From 7a7918f65aae9c66cdae300f55cadb6e52868275 Mon Sep 17 00:00:00 2001 From: citation-bot Date: Tue, 14 Nov 2023 00:52:57 +0000 Subject: [PATCH] [Citation-Bot] update citation automatically --- .github/citation/citation.json | 2 +- README.md | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/citation/citation.json b/.github/citation/citation.json index 09a3230..0ebaf11 100644 --- a/.github/citation/citation.json +++ b/.github/citation/citation.json @@ -1 +1 @@ -{"AWB-GCN: A Graph Convolutional Network Accelerator with Runtime Workload Rebalancing": {"citation": 207, "last update": "2023-11-08"}, "GNNAdvisor: An Adaptive and Efficient Runtime System for GNN Acceleration on GPUs": {"citation": 105, "last update": "2023-11-08"}, "DyGNN: Algorithm and Architecture Support of vertex Dynamic Pruning for Graph Neural Networks": {"citation": 12, "last update": "2023-11-08"}, "BGL: GPU-Efficient GNN Training by Optimizing Graph Data I/O and Preprocessing": {"citation": 25, "last update": "2023-11-08"}, "EnGN: A High-Throughput and Energy-Efficient Accelerator for Large Graph Neural Networks": {"citation": 137, "last update": "2023-11-08"}, "Reducing Communication in Graph Neural Network Training": {"citation": 78, "last update": "2023-11-08"}, "fuseGNN: Accelerating Graph Convolutional Neural Network Training on GPGPU": {"citation": 15, "last update": "2023-11-08"}, "Deep Graph Library: A Graph-Centric, Highly-Performant Package for Graph Neural Networks": {"citation": 857, "last update": "2023-11-08"}, "Fast Graph Representation Learning with PyTorch Geometric": {"citation": 3223, "last update": "2023-11-08"}, "Relational Inductive Biases, Deep Learning, and Graph Networks": {"citation": 3039, "last update": "2023-11-08"}, "FlexGraph: A Flexible and Efficient Distributed Framework for GNN Training": {"citation": 37, "last update": "2023-11-08"}, "TigerGraph: A Native MPP Graph Database": {"citation": 55, "last update": "2023-11-08"}, "Degree-Quant: Quantization-Aware Training for Graph Neural Networks": {"citation": 117, "last update": "2023-11-08"}, "BNS-GCN: Efficient Full-Graph Training of Graph Convolutional Networks with Partition-Parallelism and Random Boundary Node Sampling": {"citation": 29, "last update": "2023-11-08"}, "Binarized Graph Neural Network": {"citation": 24, "last update": "2023-11-08"}, "GenGNN: A Generic FPGA Framework for Graph Neural Network Acceleration": {"citation": 7, "last update": "2023-11-08"}, "GNNLab: A Factored System for Sample-based GNN Training over GPUs": {"citation": 30, "last update": "2023-11-08"}, "StreamGCN: Accelerating Graph Convolutional Networks with Streaming Processing": {"citation": 2, "last update": "2023-11-08"}, "FusedMM: A Unified SDDMM-SpMM Kernel for Graph Embedding and Graph Neural Networks": {"citation": 28, "last update": "2023-11-08"}, "$P^3$: Distributed Deep Graph Learning at Scale": {"citation": 89, "last update": "2023-11-08"}, "G$^3$: When Graph Neural Networks Meet Parallel Graph Processing Systems on GPUs": {"citation": 36, "last update": "2023-11-08"}, "PipeGCN: Efficient Full-Graph Training of Graph Convolutional Networks with Pipelined Feature Communication": {"citation": 31, "last update": "2023-11-08"}, "EPQuant: A Graph Neural Network Compression Approach Based on Product Quantization": {"citation": 6, "last update": "2023-11-10"}, "Graph Neural Networks in TensorFlow and Keras with Spektral": {"citation": 218, "last update": "2023-11-10"}, "Efficient Scaling of Dynamic Graph Neural Networks": {"citation": 10, "last update": "2023-11-10"}, "Accelerating Training and Inference of Graph Neural Networks with Fast Sampling and Pipelining": {"citation": 23, "last update": "2023-11-10"}, "QGTC: Accelerating Quantized Graph Neural Networks via GPU Tensor Core": {"citation": 26, "last update": "2023-11-10"}, "BlockGNN: Towards Efficient GNN Acceleration Using Block-Circulant Weight Matrices": {"citation": 25, "last update": "2023-11-10"}, "Learned Low Precision Graph Neural Networks": {"citation": 25, "last update": "2023-11-10"}, "2PGraph: Accelerating GNN Training over Large Graphs on GPU Clusters": {"citation": 11, "last update": "2023-11-10"}, "Improving the Accuracy, Scalability, and Performance of Graph Neural Networks with Roc": {"citation": 167, "last update": "2023-11-10"}, "Large Graph Convolutional Network Training with GPU-Oriented Data Communication Architecture": {"citation": 44, "last update": "2023-11-10"}, "GraphFM: Improving Large-Scale GNN Training via Feature Momentum": {"citation": 15, "last update": "2023-11-10"}, "Dorylus: Affordable, Scalable, and Accurate GNN Training with Distributed CPU Servers and Serverless Threads": {"citation": 87, "last update": "2023-11-10"}, "PaGraph: Scaling GNN Training on Large Graphs via Computation-aware Caching": {"citation": 94, "last update": "2023-11-10"}, "NeuGraph: Parallel Deep Neural Network Computation on Large Graphs": {"citation": 211, "last update": "2023-11-10"}, "Computing Graph Neural Networks: A Survey from Algorithms to Accelerators": {"citation": 153, "last update": "2023-11-10"}, "ZIPPER: Exploiting Tile- and Operator-level Parallelism for General and Scalable Graph Neural Network Acceleration": {"citation": 4, "last update": "2023-11-10"}, "GCNAX: A Flexible and Energy-efficient Accelerator for Graph Convolutional Neural Networks": {"citation": 91, "last update": "2023-11-10"}, "DistDGL: Distributed Graph Neural Network Training for Billion-Scale Graphs": {"citation": 86, "last update": "2023-11-10"}, "SGQuant: Squeezing the Last Bit on Graph Neural Networks with Specialized Quantization": {"citation": 30, "last update": "2023-11-10"}, "CogDL: A Toolkit for Deep Learning on Graphs": {"citation": 14, "last update": "2023-11-10"}, "AliGraph: A Comprehensive Graph Neural Network Platform": {"citation": 231, "last update": "2023-11-10"}, "Hardware Acceleration of Large Scale GCN Inference": {"citation": 64, "last update": "2023-11-10"}, "Learn Locally, Correct Globally: A Distributed Algorithm for Training Graph Neural Networks": {"citation": 21, "last update": "2023-11-12"}, "TARe: Task-Adaptive in-situ ReRAM Computing for Graph Learning": {"citation": 10, "last update": "2023-11-12"}, "GCoD: Graph Convolutional Network Acceleration via Dedicated Algorithm and Accelerator Co-Design": {"citation": 23, "last update": "2023-11-12"}, "Hyperscale FPGA-as-a-service architecture for large-scale distributed graph neural network": {"citation": 13, "last update": "2023-11-12"}, "Understanding GNN Computational Graph: A Coordinated Computation, IO, and Memory Perspective": {"citation": 28, "last update": "2023-11-12"}, "DRGN: a dynamically reconfigurable accelerator for graph neural networks": {"citation": 1, "last update": "2023-11-12"}, "Global Neighbor Sampling for Mixed CPU-GPU Training on Giant Graphs": {"citation": 24, "last update": "2023-11-12"}, "Efficient Data Loader for Fast Sampling-Based GNN Training on Large Graphs": {"citation": 21, "last update": "2023-11-12"}, "Graphite: Optimizing Graph Neural Networks on CPUs Through Cooperative Software-Hardware Techniques": {"citation": 11, "last update": "2023-11-12"}, "Rubik: A Hierarchical Architecture for Efficient Graph Learning": {"citation": 11, "last update": "2023-11-12"}, "HyGCN: A GCN Accelerator with Hybrid Architecture": {"citation": 254, "last update": "2023-11-12"}, "GNNIE: GNN Inference Engine with Load-balancing and Graph-specific Caching": {"citation": 10, "last update": "2023-11-12"}, "FeatGraph: A Flexible and Efficient Backend for Graph Neural Network Systems": {"citation": 64, "last update": "2023-11-12"}, "EXACT: Scalable Graph Neural Networks Training via Extreme Activation Compression": {"citation": 40, "last update": "2023-11-12"}, "G-CoS: GNN-Accelerator Co-Search Towards Both Better Accuracy and Efficiency": {"citation": 18, "last update": "2023-11-12"}, "Marius++: Large-Scale Training of Graph Neural Networks on a Single Machine": {"citation": 0, "last update": "2023-11-12"}, "GRIP: A Graph Neural Network Accelerator Architecture": {"citation": 70, "last update": "2023-11-12"}, "GCNear: A Hybrid Architecture for Efficient GCN Training with Near-Memory Processing": {"citation": 8, "last update": "2023-11-12"}, "FlowGNN: A Dataflow Architecture for Universal Graph Neural Network Inference via Multi-Queue Streaming": {"citation": 9, "last update": "2023-11-12"}, "ReGraphX: NoC-enabled 3D Heterogeneous ReRAM Architecture for Training Graph Neural Networks": {"citation": 25, "last update": "2023-11-12"}, "GIST: Distributed Training for Large-Scale Graph Convolutional Networks": {"citation": 9, "last update": "2023-11-13"}, "Binary Graph Neural Networks": {"citation": 42, "last update": "2023-11-13"}, "Parallel and Distributed Graph Neural Networks: An In-Depth Concurrency Analysis": {"citation": 21, "last update": "2023-11-13"}, "GraphACT: Accelerating GCN Training on CPU-FPGA Heterogeneous Platforms": {"citation": 121, "last update": "2023-11-13"}, "Seastar: Vertex-Centric Programming for Graph Neural Networks": {"citation": 36, "last update": "2023-11-13"}, "PaSca: A Graph Neural Architecture Search System under the Scalable Paradigm": {"citation": 28, "last update": "2023-11-13"}, "AGL: A Scalable System for Industrial-purpose Graph Machine Learning": {"citation": 88, "last update": "2023-11-13"}, "GE-SpMM: General-purpose Sparse Matrix-Matrix Multiplication on GPUs for Graph Neural Networks": {"citation": 78, "last update": "2023-11-13"}, "Graphiler: Optimizing Graph Neural Networks with Message Passing Data Flow Graph": {"citation": 16, "last update": "2023-11-13"}, "DistGNN: Scalable Distributed Training for Large-Scale Graph Neural Networks": {"citation": 78, "last update": "2023-11-13"}, "DIG: A Turnkey Library for Diving into Graph Deep Learning Research": {"citation": 67, "last update": "2023-11-13"}, "Accelerating Graph Convolutional Networks Using Crossbar-based Processing-In-Memory Architectures": {"citation": 19, "last update": "2023-11-13"}, "Understanding the Design Space of Sparse/Dense Multiphase Dataflows for Mapping Graph Neural Networks on Spatial Accelerators": {"citation": 5, "last update": "2023-11-13"}, "Understanding and Bridging the Gaps in Current GNN Performance Optimizations": {"citation": 52, "last update": "2023-11-13"}, "MGG: Accelerating Graph Neural Networks with Fine-Grained Intra-Kernel Communication-Computation Pipelining on Multi-GPU Platforms": {"citation": 2, "last update": "2023-11-13"}, "GNNPipe: Accelerating Distributed Full-Graph GNN Training with Pipelined Model Parallelism": {"citation": 0, "last update": "2023-11-13"}, "GNN-PIM: A Processing-in-Memory Architecture for Graph Neural Networks": {"citation": 21, "last update": "2023-11-13"}, "Sancus: Staleness-Aware Communication-Avoiding Full-Graph Decentralized Training in Large-Scale Graph Neural Networks": {"citation": 24, "last update": "2023-11-13"}, "Distributed Graph Neural Network Training: A Survey": {"citation": 11, "last update": "2023-11-13"}, "Bi-GCN: Binary Graph Convolutional Network": {"citation": 36, "last update": "2023-11-13"}, "Sequential Aggregation and Rematerialization: Distributed Full-batch Training of Graph Neural Networks on Large Graphs": {"citation": 14, "last update": "2023-11-13"}, "PCGCN: Partition-Centric Processing for Accelerating Graph Convolutional Network": {"citation": 40, "last update": "2023-11-13"}, "Hardware Acceleration of Graph Neural Networks": {"citation": 103, "last update": "2023-11-13"}, "Distributed Hybrid CPU and GPU training for Graph Neural Networks on Billion-Scale Graphs": {"citation": 13, "last update": "2023-11-13"}, "DGCL: An Efficient Communication Library for Distributed GNN Training": {"citation": 47, "last update": "2023-11-13"}, "TLPGNN: A Lightweight Two-Level Parallelism Paradigm for Graph Neural Network Computation on GPU": {"citation": 8, "last update": "2023-11-13"}, "GNNAutoScale: Scalable and Expressive Graph Neural Networks via Historical Embeddings": {"citation": 95, "last update": "2023-11-13"}, "I-GCN: A Graph Convolutional Network Accelerator with Runtime Locality Enhancement through Islandization": {"citation": 65, "last update": "2023-11-13"}, "ByteGNN: Efficient Graph Neural Network Training at Large Scale": {"citation": 32, "last update": "2023-11-13"}} \ No newline at end of file +{"QGTC: Accelerating Quantized Graph Neural Networks via GPU Tensor Core": {"citation": 26, "last update": "2023-11-10"}, "BlockGNN: Towards Efficient GNN Acceleration Using Block-Circulant Weight Matrices": {"citation": 25, "last update": "2023-11-10"}, "Learned Low Precision Graph Neural Networks": {"citation": 25, "last update": "2023-11-10"}, "2PGraph: Accelerating GNN Training over Large Graphs on GPU Clusters": {"citation": 11, "last update": "2023-11-10"}, "Improving the Accuracy, Scalability, and Performance of Graph Neural Networks with Roc": {"citation": 167, "last update": "2023-11-10"}, "Large Graph Convolutional Network Training with GPU-Oriented Data Communication Architecture": {"citation": 44, "last update": "2023-11-10"}, "GraphFM: Improving Large-Scale GNN Training via Feature Momentum": {"citation": 15, "last update": "2023-11-10"}, "Dorylus: Affordable, Scalable, and Accurate GNN Training with Distributed CPU Servers and Serverless Threads": {"citation": 87, "last update": "2023-11-10"}, "PaGraph: Scaling GNN Training on Large Graphs via Computation-aware Caching": {"citation": 94, "last update": "2023-11-10"}, "NeuGraph: Parallel Deep Neural Network Computation on Large Graphs": {"citation": 211, "last update": "2023-11-10"}, "Computing Graph Neural Networks: A Survey from Algorithms to Accelerators": {"citation": 153, "last update": "2023-11-10"}, "ZIPPER: Exploiting Tile- and Operator-level Parallelism for General and Scalable Graph Neural Network Acceleration": {"citation": 4, "last update": "2023-11-10"}, "GCNAX: A Flexible and Energy-efficient Accelerator for Graph Convolutional Neural Networks": {"citation": 91, "last update": "2023-11-10"}, "DistDGL: Distributed Graph Neural Network Training for Billion-Scale Graphs": {"citation": 86, "last update": "2023-11-10"}, "SGQuant: Squeezing the Last Bit on Graph Neural Networks with Specialized Quantization": {"citation": 30, "last update": "2023-11-10"}, "CogDL: A Toolkit for Deep Learning on Graphs": {"citation": 14, "last update": "2023-11-10"}, "AliGraph: A Comprehensive Graph Neural Network Platform": {"citation": 231, "last update": "2023-11-10"}, "Hardware Acceleration of Large Scale GCN Inference": {"citation": 64, "last update": "2023-11-10"}, "Learn Locally, Correct Globally: A Distributed Algorithm for Training Graph Neural Networks": {"citation": 21, "last update": "2023-11-12"}, "TARe: Task-Adaptive in-situ ReRAM Computing for Graph Learning": {"citation": 10, "last update": "2023-11-12"}, "GCoD: Graph Convolutional Network Acceleration via Dedicated Algorithm and Accelerator Co-Design": {"citation": 23, "last update": "2023-11-12"}, "Hyperscale FPGA-as-a-service architecture for large-scale distributed graph neural network": {"citation": 13, "last update": "2023-11-12"}, "Understanding GNN Computational Graph: A Coordinated Computation, IO, and Memory Perspective": {"citation": 28, "last update": "2023-11-12"}, "DRGN: a dynamically reconfigurable accelerator for graph neural networks": {"citation": 1, "last update": "2023-11-12"}, "Global Neighbor Sampling for Mixed CPU-GPU Training on Giant Graphs": {"citation": 24, "last update": "2023-11-12"}, "Efficient Data Loader for Fast Sampling-Based GNN Training on Large Graphs": {"citation": 21, "last update": "2023-11-12"}, "Graphite: Optimizing Graph Neural Networks on CPUs Through Cooperative Software-Hardware Techniques": {"citation": 11, "last update": "2023-11-12"}, "Rubik: A Hierarchical Architecture for Efficient Graph Learning": {"citation": 11, "last update": "2023-11-12"}, "HyGCN: A GCN Accelerator with Hybrid Architecture": {"citation": 254, "last update": "2023-11-12"}, "GNNIE: GNN Inference Engine with Load-balancing and Graph-specific Caching": {"citation": 10, "last update": "2023-11-12"}, "FeatGraph: A Flexible and Efficient Backend for Graph Neural Network Systems": {"citation": 64, "last update": "2023-11-12"}, "EXACT: Scalable Graph Neural Networks Training via Extreme Activation Compression": {"citation": 40, "last update": "2023-11-12"}, "G-CoS: GNN-Accelerator Co-Search Towards Both Better Accuracy and Efficiency": {"citation": 18, "last update": "2023-11-12"}, "Marius++: Large-Scale Training of Graph Neural Networks on a Single Machine": {"citation": 0, "last update": "2023-11-12"}, "GRIP: A Graph Neural Network Accelerator Architecture": {"citation": 70, "last update": "2023-11-12"}, "GCNear: A Hybrid Architecture for Efficient GCN Training with Near-Memory Processing": {"citation": 8, "last update": "2023-11-12"}, "FlowGNN: A Dataflow Architecture for Universal Graph Neural Network Inference via Multi-Queue Streaming": {"citation": 9, "last update": "2023-11-12"}, "ReGraphX: NoC-enabled 3D Heterogeneous ReRAM Architecture for Training Graph Neural Networks": {"citation": 25, "last update": "2023-11-12"}, "GIST: Distributed Training for Large-Scale Graph Convolutional Networks": {"citation": 9, "last update": "2023-11-13"}, "Binary Graph Neural Networks": {"citation": 42, "last update": "2023-11-13"}, "Parallel and Distributed Graph Neural Networks: An In-Depth Concurrency Analysis": {"citation": 21, "last update": "2023-11-13"}, "GraphACT: Accelerating GCN Training on CPU-FPGA Heterogeneous Platforms": {"citation": 121, "last update": "2023-11-13"}, "Seastar: Vertex-Centric Programming for Graph Neural Networks": {"citation": 36, "last update": "2023-11-13"}, "PaSca: A Graph Neural Architecture Search System under the Scalable Paradigm": {"citation": 28, "last update": "2023-11-13"}, "AGL: A Scalable System for Industrial-purpose Graph Machine Learning": {"citation": 88, "last update": "2023-11-13"}, "GE-SpMM: General-purpose Sparse Matrix-Matrix Multiplication on GPUs for Graph Neural Networks": {"citation": 78, "last update": "2023-11-13"}, "Graphiler: Optimizing Graph Neural Networks with Message Passing Data Flow Graph": {"citation": 16, "last update": "2023-11-13"}, "DistGNN: Scalable Distributed Training for Large-Scale Graph Neural Networks": {"citation": 78, "last update": "2023-11-13"}, "DIG: A Turnkey Library for Diving into Graph Deep Learning Research": {"citation": 67, "last update": "2023-11-13"}, "Accelerating Graph Convolutional Networks Using Crossbar-based Processing-In-Memory Architectures": {"citation": 19, "last update": "2023-11-13"}, "Understanding the Design Space of Sparse/Dense Multiphase Dataflows for Mapping Graph Neural Networks on Spatial Accelerators": {"citation": 5, "last update": "2023-11-13"}, "Understanding and Bridging the Gaps in Current GNN Performance Optimizations": {"citation": 52, "last update": "2023-11-13"}, "MGG: Accelerating Graph Neural Networks with Fine-Grained Intra-Kernel Communication-Computation Pipelining on Multi-GPU Platforms": {"citation": 2, "last update": "2023-11-13"}, "GNNPipe: Accelerating Distributed Full-Graph GNN Training with Pipelined Model Parallelism": {"citation": 0, "last update": "2023-11-13"}, "GNN-PIM: A Processing-in-Memory Architecture for Graph Neural Networks": {"citation": 21, "last update": "2023-11-13"}, "Sancus: Staleness-Aware Communication-Avoiding Full-Graph Decentralized Training in Large-Scale Graph Neural Networks": {"citation": 24, "last update": "2023-11-13"}, "Distributed Graph Neural Network Training: A Survey": {"citation": 11, "last update": "2023-11-13"}, "Bi-GCN: Binary Graph Convolutional Network": {"citation": 36, "last update": "2023-11-13"}, "Sequential Aggregation and Rematerialization: Distributed Full-batch Training of Graph Neural Networks on Large Graphs": {"citation": 14, "last update": "2023-11-13"}, "PCGCN: Partition-Centric Processing for Accelerating Graph Convolutional Network": {"citation": 40, "last update": "2023-11-13"}, "Hardware Acceleration of Graph Neural Networks": {"citation": 103, "last update": "2023-11-13"}, "Distributed Hybrid CPU and GPU training for Graph Neural Networks on Billion-Scale Graphs": {"citation": 13, "last update": "2023-11-13"}, "DGCL: An Efficient Communication Library for Distributed GNN Training": {"citation": 47, "last update": "2023-11-13"}, "TLPGNN: A Lightweight Two-Level Parallelism Paradigm for Graph Neural Network Computation on GPU": {"citation": 8, "last update": "2023-11-13"}, "GNNAutoScale: Scalable and Expressive Graph Neural Networks via Historical Embeddings": {"citation": 95, "last update": "2023-11-13"}, "I-GCN: A Graph Convolutional Network Accelerator with Runtime Locality Enhancement through Islandization": {"citation": 65, "last update": "2023-11-13"}, "ByteGNN: Efficient Graph Neural Network Training at Large Scale": {"citation": 32, "last update": "2023-11-13"}, "AWB-GCN: A Graph Convolutional Network Accelerator with Runtime Workload Rebalancing": {"citation": 211, "last update": "2023-11-14"}, "GNNAdvisor: An Adaptive and Efficient Runtime System for GNN Acceleration on GPUs": {"citation": 105, "last update": "2023-11-14"}, "DyGNN: Algorithm and Architecture Support of vertex Dynamic Pruning for Graph Neural Networks": {"citation": 12, "last update": "2023-11-14"}, "BGL: GPU-Efficient GNN Training by Optimizing Graph Data I/O and Preprocessing": {"citation": 25, "last update": "2023-11-14"}, "EnGN: A High-Throughput and Energy-Efficient Accelerator for Large Graph Neural Networks": {"citation": 139, "last update": "2023-11-14"}, "Reducing Communication in Graph Neural Network Training": {"citation": 79, "last update": "2023-11-14"}, "fuseGNN: Accelerating Graph Convolutional Neural Network Training on GPGPU": {"citation": 15, "last update": "2023-11-14"}, "Deep Graph Library: A Graph-Centric, Highly-Performant Package for Graph Neural Networks": {"citation": 863, "last update": "2023-11-14"}, "Fast Graph Representation Learning with PyTorch Geometric": {"citation": 3246, "last update": "2023-11-14"}, "Relational Inductive Biases, Deep Learning, and Graph Networks": {"citation": 3051, "last update": "2023-11-14"}, "FlexGraph: A Flexible and Efficient Distributed Framework for GNN Training": {"citation": 37, "last update": "2023-11-14"}, "TigerGraph: A Native MPP Graph Database": {"citation": 55, "last update": "2023-11-14"}, "Degree-Quant: Quantization-Aware Training for Graph Neural Networks": {"citation": 117, "last update": "2023-11-14"}, "BNS-GCN: Efficient Full-Graph Training of Graph Convolutional Networks with Partition-Parallelism and Random Boundary Node Sampling": {"citation": 30, "last update": "2023-11-14"}, "Binarized Graph Neural Network": {"citation": 24, "last update": "2023-11-14"}, "GenGNN: A Generic FPGA Framework for Graph Neural Network Acceleration": {"citation": 7, "last update": "2023-11-14"}, "GNNLab: A Factored System for Sample-based GNN Training over GPUs": {"citation": 31, "last update": "2023-11-14"}, "StreamGCN: Accelerating Graph Convolutional Networks with Streaming Processing": {"citation": 2, "last update": "2023-11-14"}, "FusedMM: A Unified SDDMM-SpMM Kernel for Graph Embedding and Graph Neural Networks": {"citation": 28, "last update": "2023-11-14"}, "$P^3$: Distributed Deep Graph Learning at Scale": {"citation": 91, "last update": "2023-11-14"}, "G$^3$: When Graph Neural Networks Meet Parallel Graph Processing Systems on GPUs": {"citation": 37, "last update": "2023-11-14"}, "PipeGCN: Efficient Full-Graph Training of Graph Convolutional Networks with Pipelined Feature Communication": {"citation": 33, "last update": "2023-11-14"}, "EPQuant: A Graph Neural Network Compression Approach Based on Product Quantization": {"citation": 6, "last update": "2023-11-14"}, "Graph Neural Networks in TensorFlow and Keras with Spektral": {"citation": 219, "last update": "2023-11-14"}, "Efficient Scaling of Dynamic Graph Neural Networks": {"citation": 10, "last update": "2023-11-14"}, "Accelerating Training and Inference of Graph Neural Networks with Fast Sampling and Pipelining": {"citation": 23, "last update": "2023-11-14"}} \ No newline at end of file diff --git a/README.md b/README.md index 7276131..61bba14 100644 --- a/README.md +++ b/README.md @@ -40,11 +40,11 @@ A list of awesome systems for graph neural network (GNN). If you have any commen | :---: | :---: | :---------: | :---: | :----: | |JMLR 2021|DIG: A Turnkey Library for Diving into Graph Deep Learning Research|TAMU| [[paper]](https://arxiv.org/abs/2103.12608)![Scholar citations](https://img.shields.io/badge/Citations-67-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/divelab/DIG)![GitHub stars](https://img.shields.io/github/stars/divelab/DIG.svg?logo=github&label=Stars)| |arXiv 2021|CogDL: A Toolkit for Deep Learning on Graphs|THU| [[paper]](https://arxiv.org/abs/2103.00959)![Scholar citations](https://img.shields.io/badge/Citations-14-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/THUDM/cogdl)![GitHub stars](https://img.shields.io/github/stars/THUDM/cogdl.svg?logo=github&label=Stars)| -|CIM 2021|Graph Neural Networks in TensorFlow and Keras with Spektral|Università della Svizzera italiana| [[paper]](https://arxiv.org/abs/2006.12138)![Scholar citations](https://img.shields.io/badge/Citations-218-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/danielegrattarola/spektral)![GitHub stars](https://img.shields.io/github/stars/danielegrattarola/spektral.svg?logo=github&label=Stars)| -|arXiv 2019|Deep Graph Library: A Graph-Centric, Highly-Performant Package for Graph Neural Networks|AWS| [[paper]](https://arxiv.org/abs/1909.01315)![Scholar citations](https://img.shields.io/badge/Citations-857-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/dmlc/dgl)![GitHub stars](https://img.shields.io/github/stars/dmlc/dgl.svg?logo=github&label=Stars)| +|CIM 2021|Graph Neural Networks in TensorFlow and Keras with Spektral|Università della Svizzera italiana| [[paper]](https://arxiv.org/abs/2006.12138)![Scholar citations](https://img.shields.io/badge/Citations-219-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/danielegrattarola/spektral)![GitHub stars](https://img.shields.io/github/stars/danielegrattarola/spektral.svg?logo=github&label=Stars)| +|arXiv 2019|Deep Graph Library: A Graph-Centric, Highly-Performant Package for Graph Neural Networks|AWS| [[paper]](https://arxiv.org/abs/1909.01315)![Scholar citations](https://img.shields.io/badge/Citations-863-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/dmlc/dgl)![GitHub stars](https://img.shields.io/github/stars/dmlc/dgl.svg?logo=github&label=Stars)| |VLDB 2019|AliGraph: A Comprehensive Graph Neural Network Platform|Alibaba| [[paper]](https://dl.acm.org/doi/pdf/10.14778/3352063.3352127)![Scholar citations](https://img.shields.io/badge/Citations-231-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/alibaba/graph-learn)![GitHub stars](https://img.shields.io/github/stars/alibaba/graph-learn.svg?logo=github&label=Stars)| |arXiv 2019|Fast Graph Representation Learning with PyTorch Geometric|TU Dortmund University| [[paper]](https://arxiv.org/abs/1903.02428)![Scholar citations](https://img.shields.io/badge/Citations-3.2k-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/rusty1s/pytorch_geometric)![GitHub stars](https://img.shields.io/github/stars/rusty1s/pytorch_geometric.svg?logo=github&label=Stars)| -|arXiv 2018|Relational Inductive Biases, Deep Learning, and Graph Networks|DeepMind| [[paper]](https://arxiv.org/abs/1806.01261)![Scholar citations](https://img.shields.io/badge/Citations-3.0k-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/deepmind/graph_nets)![GitHub stars](https://img.shields.io/github/stars/deepmind/graph_nets.svg?logo=github&label=Stars)| +|arXiv 2018|Relational Inductive Biases, Deep Learning, and Graph Networks|DeepMind| [[paper]](https://arxiv.org/abs/1806.01261)![Scholar citations](https://img.shields.io/badge/Citations-3.1k-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/deepmind/graph_nets)![GitHub stars](https://img.shields.io/github/stars/deepmind/graph_nets.svg?logo=github&label=Stars)| ### GNN Kernels | Venue | Title | Affiliation |       Link       |   Source   | | :---: | :---: | :---------: | :---: | :----: | @@ -66,22 +66,22 @@ A list of awesome systems for graph neural network (GNN). If you have any commen |arXiv 2023|GNNPipe: Accelerating Distributed Full-Graph GNN Training with Pipelined Model Parallelism|Purdue| [[paper]](https://arxiv.org/pdf/2308.10087.pdf)![Scholar citations](https://img.shields.io/badge/Citations-0-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| |OSDI 2023|MGG: Accelerating Graph Neural Networks with Fine-Grained Intra-Kernel Communication-Computation Pipelining on Multi-GPU Platforms|UCSB| [[paper]](https://www.usenix.org/system/files/osdi23-wang-yuke.pdf)![Scholar citations](https://img.shields.io/badge/Citations-2-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/YukeWang96/MGG_OSDI23)![GitHub stars](https://img.shields.io/github/stars/YukeWang96/MGG_OSDI23.svg?logo=github&label=Stars)| |VLDB 2022|Sancus: Staleness-Aware Communication-Avoiding Full-Graph Decentralized Training in Large-Scale Graph Neural Networks|HKUST| [[paper]](https://www.vldb.org/pvldb/vol15/p1937-peng.pdf)![Scholar citations](https://img.shields.io/badge/Citations-24-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/chenzhao/light-dist-gnn)![GitHub stars](https://img.shields.io/github/stars/chenzhao/light-dist-gnn.svg?logo=github&label=Stars)| -|MLSys 2022|BNS-GCN: Efficient Full-Graph Training of Graph Convolutional Networks with Partition-Parallelism and Random Boundary Node Sampling|Rice, UIUC| [[paper]](https://proceedings.mlsys.org/paper/2022/file/d1fe173d08e959397adf34b1d77e88d7-Paper.pdf)![Scholar citations](https://img.shields.io/badge/Citations-29-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/RICE-EIC/BNS-GCN)![GitHub stars](https://img.shields.io/github/stars/RICE-EIC/BNS-GCN.svg?logo=github&label=Stars)| +|MLSys 2022|BNS-GCN: Efficient Full-Graph Training of Graph Convolutional Networks with Partition-Parallelism and Random Boundary Node Sampling|Rice, UIUC| [[paper]](https://proceedings.mlsys.org/paper/2022/file/d1fe173d08e959397adf34b1d77e88d7-Paper.pdf)![Scholar citations](https://img.shields.io/badge/Citations-30-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/RICE-EIC/BNS-GCN)![GitHub stars](https://img.shields.io/github/stars/RICE-EIC/BNS-GCN.svg?logo=github&label=Stars)| |MLSys 2022|Sequential Aggregation and Rematerialization: Distributed Full-batch Training of Graph Neural Networks on Large Graphs|Intel| [[paper]](https://arxiv.org/abs/2111.06483)![Scholar citations](https://img.shields.io/badge/Citations-14-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/IntelLabs/SAR)![GitHub stars](https://img.shields.io/github/stars/IntelLabs/SAR.svg?logo=github&label=Stars)| |WWW 2022|PaSca: A Graph Neural Architecture Search System under the Scalable Paradigm|PKU| [[paper]](https://dl.acm.org/doi/abs/10.1145/3485447.3511986)![Scholar citations](https://img.shields.io/badge/Citations-28-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| -|ICLR 2022|PipeGCN: Efficient Full-Graph Training of Graph Convolutional Networks with Pipelined Feature Communication|Rice| [[paper]](https://openreview.net/pdf?id=kSwqMH0zn1F)![Scholar citations](https://img.shields.io/badge/Citations-31-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/RICE-EIC/PipeGCN)![GitHub stars](https://img.shields.io/github/stars/RICE-EIC/PipeGCN.svg?logo=github&label=Stars)| +|ICLR 2022|PipeGCN: Efficient Full-Graph Training of Graph Convolutional Networks with Pipelined Feature Communication|Rice| [[paper]](https://openreview.net/pdf?id=kSwqMH0zn1F)![Scholar citations](https://img.shields.io/badge/Citations-33-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/RICE-EIC/PipeGCN)![GitHub stars](https://img.shields.io/github/stars/RICE-EIC/PipeGCN.svg?logo=github&label=Stars)| |ICLR 2022|Learn Locally, Correct Globally: A Distributed Algorithm for Training Graph Neural Networks|PSU| [[paper]](https://openreview.net/pdf?id=FndDxSz3LxQ)![Scholar citations](https://img.shields.io/badge/Citations-21-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/MortezaRamezani/llcg)![GitHub stars](https://img.shields.io/github/stars/MortezaRamezani/llcg.svg?logo=github&label=Stars)| |arXiv 2021|Distributed Hybrid CPU and GPU training for Graph Neural Networks on Billion-Scale Graphs|AWS| [[paper]](https://arxiv.org/pdf/2112.15345.pdf)![Scholar citations](https://img.shields.io/badge/Citations-13-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| |SC 2021|DistGNN: Scalable Distributed Training for Large-Scale Graph Neural Networks|Intel| [[paper]](https://dl.acm.org/doi/pdf/10.1145/3458817.3480856)![Scholar citations](https://img.shields.io/badge/Citations-78-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/dmlc/dgl/pull/3024)| |SC 2021|Efficient Scaling of Dynamic Graph Neural Networks|IBM| [[paper]](https://dl.acm.org/doi/pdf/10.1145/3458817.3480858)![Scholar citations](https://img.shields.io/badge/Citations-10-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| |CLUSTER 2021|2PGraph: Accelerating GNN Training over Large Graphs on GPU Clusters|NUDT| [[paper]](https://ieeexplore.ieee.org/abstract/document/9556026)![Scholar citations](https://img.shields.io/badge/Citations-11-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| -|OSDI 2021|$P^3$: Distributed Deep Graph Learning at Scale|MSR| [[paper]](https://www.usenix.org/system/files/osdi21-gandhi.pdf)![Scholar citations](https://img.shields.io/badge/Citations-89-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| +|OSDI 2021|$P^3$: Distributed Deep Graph Learning at Scale|MSR| [[paper]](https://www.usenix.org/system/files/osdi21-gandhi.pdf)![Scholar citations](https://img.shields.io/badge/Citations-91-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| |OSDI 2021|Dorylus: Affordable, Scalable, and Accurate GNN Training with Distributed CPU Servers and Serverless Threads|UCLA| [[paper]](http://web.cs.ucla.edu/~harryxu/papers/dorylus-osdi21.pdf)![Scholar citations](https://img.shields.io/badge/Citations-87-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/uclasystem/dorylus)![GitHub stars](https://img.shields.io/github/stars/uclasystem/dorylus.svg?logo=github&label=Stars)| |arXiv 2021|GIST: Distributed Training for Large-Scale Graph Convolutional Networks|Rice| [[paper]](https://arxiv.org/abs/2102.10424)![Scholar citations](https://img.shields.io/badge/Citations-9-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| |EuroSys 2021|FlexGraph: A Flexible and Efficient Distributed Framework for GNN Training|Alibaba| [[paper]](https://dl.acm.org/doi/pdf/10.1145/3447786.3456229)![Scholar citations](https://img.shields.io/badge/Citations-37-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| |EuroSys 2021|DGCL: An Efficient Communication Library for Distributed GNN Training|CUHK| [[paper]](https://dl.acm.org/doi/abs/10.1145/3447786.3456233)![Scholar citations](https://img.shields.io/badge/Citations-47-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/czkkkkkk/ragdoll)![GitHub stars](https://img.shields.io/github/stars/czkkkkkk/ragdoll.svg?logo=github&label=Stars)| -|SC 2020|Reducing Communication in Graph Neural Network Training|UC Berkeley| [[paper]](https://arxiv.org/pdf/2005.03300.pdf)![Scholar citations](https://img.shields.io/badge/Citations-78-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/PASSIONLab/CAGNET)![GitHub stars](https://img.shields.io/github/stars/PASSIONLab/CAGNET.svg?logo=github&label=Stars)| -|VLDB 2020|G$^3$: When Graph Neural Networks Meet Parallel Graph Processing Systems on GPUs|NUS| [[paper]](http://www.vldb.org/pvldb/vol13/p2813-liu.pdf)![Scholar citations](https://img.shields.io/badge/Citations-36-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/Xtra-Computing/G3)![GitHub stars](https://img.shields.io/github/stars/Xtra-Computing/G3.svg?logo=github&label=Stars)| +|SC 2020|Reducing Communication in Graph Neural Network Training|UC Berkeley| [[paper]](https://arxiv.org/pdf/2005.03300.pdf)![Scholar citations](https://img.shields.io/badge/Citations-79-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/PASSIONLab/CAGNET)![GitHub stars](https://img.shields.io/github/stars/PASSIONLab/CAGNET.svg?logo=github&label=Stars)| +|VLDB 2020|G$^3$: When Graph Neural Networks Meet Parallel Graph Processing Systems on GPUs|NUS| [[paper]](http://www.vldb.org/pvldb/vol13/p2813-liu.pdf)![Scholar citations](https://img.shields.io/badge/Citations-37-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/Xtra-Computing/G3)![GitHub stars](https://img.shields.io/github/stars/Xtra-Computing/G3.svg?logo=github&label=Stars)| |IA3 2020|DistDGL: Distributed Graph Neural Network Training for Billion-Scale Graphs|AWS| [[paper]](https://arxiv.org/pdf/2010.05337.pdf)![Scholar citations](https://img.shields.io/badge/Citations-86-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/dmlc/dgl/tree/master/python/dgl/distributed)| |MLSys 2020|Improving the Accuracy, Scalability, and Performance of Graph Neural Networks with Roc|Stanford| [[paper]](https://proceedings.mlsys.org/paper/2020/file/fe9fc289c3ff0af142b6d3bead98a923-Paper.pdf)![Scholar citations](https://img.shields.io/badge/Citations-167-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/jiazhihao/ROC)![GitHub stars](https://img.shields.io/github/stars/jiazhihao/ROC.svg?logo=github&label=Stars)| |arXiv 2020|AGL: A Scalable System for Industrial-purpose Graph Machine Learning|Ant Financial Services Group| [[paper]](https://arxiv.org/abs/2003.02454)![Scholar citations](https://img.shields.io/badge/Citations-88-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| @@ -111,7 +111,7 @@ A list of awesome systems for graph neural network (GNN). If you have any commen | :---: | :---: | :---------: | :---: | :----: | |NSDI 2023|BGL: GPU-Efficient GNN Training by Optimizing Graph Data I/O and Preprocessing|ByteDance| [[paper]](https://arxiv.org/abs/2112.08541)![Scholar citations](https://img.shields.io/badge/Citations-25-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| |MLSys 2022|Accelerating Training and Inference of Graph Neural Networks with Fast Sampling and Pipelining|MIT| [[paper]](https://proceedings.mlsys.org/paper/2022/file/35f4a8d465e6e1edc05f3d8ab658c551-Paper.pdf)![Scholar citations](https://img.shields.io/badge/Citations-23-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/MITIBMxGraph/SALIENT)![GitHub stars](https://img.shields.io/github/stars/MITIBMxGraph/SALIENT.svg?logo=github&label=Stars)| -|EuroSys 2022|GNNLab: A Factored System for Sample-based GNN Training over GPUs|SJTU| [[paper]](https://dl.acm.org/doi/abs/10.1145/3492321.3519557)![Scholar citations](https://img.shields.io/badge/Citations-30-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/SJTU-IPADS/gnnlab)![GitHub stars](https://img.shields.io/github/stars/SJTU-IPADS/gnnlab.svg?logo=github&label=Stars)| +|EuroSys 2022|GNNLab: A Factored System for Sample-based GNN Training over GPUs|SJTU| [[paper]](https://dl.acm.org/doi/abs/10.1145/3492321.3519557)![Scholar citations](https://img.shields.io/badge/Citations-31-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/SJTU-IPADS/gnnlab)![GitHub stars](https://img.shields.io/github/stars/SJTU-IPADS/gnnlab.svg?logo=github&label=Stars)| |KDD 2021|Global Neighbor Sampling for Mixed CPU-GPU Training on Giant Graphs|UCLA| [[paper]](https://arxiv.org/pdf/2106.06150.pdf)![Scholar citations](https://img.shields.io/badge/Citations-24-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| |PPoPP 2021|Understanding and Bridging the Gaps in Current GNN Performance Optimizations|THU| [[paper]](https://dl.acm.org/doi/pdf/10.1145/3437801.3441585)![Scholar citations](https://img.shields.io/badge/Citations-52-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/xxcclong/GNN-Computing)![GitHub stars](https://img.shields.io/github/stars/xxcclong/GNN-Computing.svg?logo=github&label=Stars)| |VLDB 2021|Large Graph Convolutional Network Training with GPU-Oriented Data Communication Architecture|UIUC| [[paper]](https://arxiv.org/abs/2103.03330)![Scholar citations](https://img.shields.io/badge/Citations-44-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)| [[code]](https://github.com/K-Wu/pytorch-direct_dgl)![GitHub stars](https://img.shields.io/github/stars/K-Wu/pytorch-direct_dgl.svg?logo=github&label=Stars)| @@ -144,12 +144,12 @@ A list of awesome systems for graph neural network (GNN). If you have any commen |ICCAD 2021|G-CoS: GNN-Accelerator Co-Search Towards Both Better Accuracy and Efficiency|Rice| [[paper]](https://arxiv.org/abs/2109.08983)![Scholar citations](https://img.shields.io/badge/Citations-18-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| |MICRO 2021|I-GCN: A Graph Convolutional Network Accelerator with Runtime Locality Enhancement through Islandization|PNNL| [[paper]](https://dl.acm.org/doi/pdf/10.1145/3466752.3480113)![Scholar citations](https://img.shields.io/badge/Citations-65-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| |arXiv 2021|ZIPPER: Exploiting Tile- and Operator-level Parallelism for General and Scalable Graph Neural Network Acceleration|SJTU| [[paper]](https://arxiv.org/abs/2107.08709)![Scholar citations](https://img.shields.io/badge/Citations-4-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| -|TComp 2021|EnGN: A High-Throughput and Energy-Efficient Accelerator for Large Graph Neural Networks|Chinese Academy of Sciences| [[paper]](https://arxiv.org/abs/1909.00155)![Scholar citations](https://img.shields.io/badge/Citations-137-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| +|TComp 2021|EnGN: A High-Throughput and Energy-Efficient Accelerator for Large Graph Neural Networks|Chinese Academy of Sciences| [[paper]](https://arxiv.org/abs/1909.00155)![Scholar citations](https://img.shields.io/badge/Citations-139-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| |HPCA 2021|GCNAX: A Flexible and Energy-efficient Accelerator for Graph Convolutional Neural Networks|GWU| [[paper]](https://ieeexplore.ieee.org/abstract/document/9407104)![Scholar citations](https://img.shields.io/badge/Citations-91-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| |APA 2020|GNN-PIM: A Processing-in-Memory Architecture for Graph Neural Networks|PKU| [[paper]](http://115.27.240.201/docs/20200915165942122459.pdf)![Scholar citations](https://img.shields.io/badge/Citations-21-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| |ASAP 2020|Hardware Acceleration of Large Scale GCN Inference|USC| [[paper]](https://ieeexplore.ieee.org/document/9153263)![Scholar citations](https://img.shields.io/badge/Citations-64-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| |DAC 2020|Hardware Acceleration of Graph Neural Networks|UIUC| [[paper]](http://rakeshk.web.engr.illinois.edu/dac20.pdf)![Scholar citations](https://img.shields.io/badge/Citations-103-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| -|MICRO 2020|AWB-GCN: A Graph Convolutional Network Accelerator with Runtime Workload Rebalancing|PNNL| [[paper]](https://ieeexplore.ieee.org/abstract/document/9252000)![Scholar citations](https://img.shields.io/badge/Citations-207-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| +|MICRO 2020|AWB-GCN: A Graph Convolutional Network Accelerator with Runtime Workload Rebalancing|PNNL| [[paper]](https://ieeexplore.ieee.org/abstract/document/9252000)![Scholar citations](https://img.shields.io/badge/Citations-211-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| |arXiv 2020|GRIP: A Graph Neural Network Accelerator Architecture|Stanford| [[paper]](https://arxiv.org/pdf/2007.13828.pdf)![Scholar citations](https://img.shields.io/badge/Citations-70-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| |HPCA 2020|HyGCN: A GCN Accelerator with Hybrid Architecture|UCSB| [[paper]](https://arxiv.org/pdf/2001.02514.pdf)![Scholar citations](https://img.shields.io/badge/Citations-254-_.svg?logo=google-scholar&labelColor=4f4f4f&color=3388ee)|| ## Contribute