diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..698a1697 --- /dev/null +++ b/404.html @@ -0,0 +1,5 @@ +404 Page not found

Not Found

This page does not exist

+ \ No newline at end of file diff --git a/CNAME b/CNAME new file mode 100644 index 00000000..73478eab --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +open-neuromorphic.org \ No newline at end of file diff --git a/about/ONM.png b/about/ONM.png new file mode 100644 index 00000000..bc5eadbf Binary files /dev/null and b/about/ONM.png differ diff --git a/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_120x120_fill_box_smart1_3.png b/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_120x120_fill_box_smart1_3.png new file mode 100644 index 00000000..cd761e89 Binary files /dev/null and b/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_120x120_fill_box_smart1_3.png differ diff --git a/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_1600x0_resize_box_3.png b/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_1600x0_resize_box_3.png new file mode 100644 index 00000000..f4226f6f Binary files /dev/null and b/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_1600x0_resize_box_3.png differ diff --git a/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_800x0_resize_box_3.png b/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_800x0_resize_box_3.png new file mode 100644 index 00000000..6f390350 Binary files /dev/null and b/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_800x0_resize_box_3.png differ diff --git a/about/index.html b/about/index.html new file mode 100644 index 00000000..27b87f61 --- /dev/null +++ b/about/index.html @@ -0,0 +1,9 @@ +About
Featured image of post About

About

This organisation is created by a loose collective of open source collaborators across academia, industry and individual contributors. Most of us have never met in person before but have started contributing to a common or each other’s projects! What connects us is the love for building tools that can be used in the neuromorphic community and we want to share ownership of this vision. If you feel like that resonates with you, please don’t hesitate to get in touch!

Open Neuromorphic (ONM) provides the following things:

  1. A curated list of software frameworks to make it easier to find the tool you need.
  2. A platform for your code. If you wish to create a new repository or migrate your existing code to ONM, please get in touch with us.
  3. Educational content to get you started in the neuromorphic world.
  4. Events about neuromorphic research and software, with contributions from both academia and industry.

Projects that we list here can fall into this non-exclusive list of categories:

  • Spiking Neural Networks (SNNs) training and/or inference, for both ML and neuroscience application.
  • Event-based sensors data handling.
  • Digital hardware designs for neuromorphic applications.
  • Mixed-signal hardware designs for neuromorphic applications.

Get in touch with us if you wish to give a talk, write an article or to know more about the neuromorphic world.

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/categories/example-category/index.html b/categories/example-category/index.html new file mode 100644 index 00000000..d4f65fa8 --- /dev/null +++ b/categories/example-category/index.html @@ -0,0 +1,5 @@ +Category: Example Category - Open Neuromorphic

Categories

0 pages

Example Category

A description of this category

+ \ No newline at end of file diff --git a/categories/example-category/index.xml b/categories/example-category/index.xml new file mode 100644 index 00000000..c92851b0 --- /dev/null +++ b/categories/example-category/index.xml @@ -0,0 +1 @@ +Example Category on Open Neuromorphichttps://open-neuromorphic.org/categories/example-category/Recent content in Example Category on Open NeuromorphicHugo -- gohugo.ioen-us \ No newline at end of file diff --git a/categories/example-category/page/1/index.html b/categories/example-category/page/1/index.html new file mode 100644 index 00000000..8b9d99c1 --- /dev/null +++ b/categories/example-category/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/categories/example-category/ \ No newline at end of file diff --git a/categories/index.html b/categories/index.html new file mode 100644 index 00000000..78f79971 --- /dev/null +++ b/categories/index.html @@ -0,0 +1,5 @@ +Categories

Section

1 page

Categories

+ \ No newline at end of file diff --git a/categories/index.xml b/categories/index.xml new file mode 100644 index 00000000..0719aa17 --- /dev/null +++ b/categories/index.xml @@ -0,0 +1 @@ +Categories on Open Neuromorphichttps://open-neuromorphic.org/categories/Recent content in Categories on Open NeuromorphicHugo -- gohugo.ioen-usExample Categoryhttps://open-neuromorphic.org/categories/example-category/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/categories/example-category/ \ No newline at end of file diff --git a/categories/page/1/index.html b/categories/page/1/index.html new file mode 100644 index 00000000..60d41ae8 --- /dev/null +++ b/categories/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/categories/ \ No newline at end of file diff --git a/events-recordings/catherine-schuman.webp b/events-recordings/catherine-schuman.webp new file mode 100644 index 00000000..34000988 Binary files /dev/null and b/events-recordings/catherine-schuman.webp differ diff --git a/events-recordings/catherine-schuman_huc056e41512c17dd339d4074ee71f8169_11002_1024x0_resize_q75_h2_box_2.webp b/events-recordings/catherine-schuman_huc056e41512c17dd339d4074ee71f8169_11002_1024x0_resize_q75_h2_box_2.webp new file mode 100644 index 00000000..38e6a8c3 Binary files /dev/null and b/events-recordings/catherine-schuman_huc056e41512c17dd339d4074ee71f8169_11002_1024x0_resize_q75_h2_box_2.webp differ diff --git a/events-recordings/catherine-schuman_huc056e41512c17dd339d4074ee71f8169_11002_480x0_resize_q75_h2_box_2.webp b/events-recordings/catherine-schuman_huc056e41512c17dd339d4074ee71f8169_11002_480x0_resize_q75_h2_box_2.webp new file mode 100644 index 00000000..03901139 Binary files /dev/null and b/events-recordings/catherine-schuman_huc056e41512c17dd339d4074ee71f8169_11002_480x0_resize_q75_h2_box_2.webp differ diff --git a/events-recordings/corradi.jpg b/events-recordings/corradi.jpg new file mode 100644 index 00000000..58d66c53 Binary files /dev/null and b/events-recordings/corradi.jpg differ diff --git a/events-recordings/giorgia-dellaferrera.jpeg b/events-recordings/giorgia-dellaferrera.jpeg new file mode 100644 index 00000000..95f4b3f0 Binary files /dev/null and b/events-recordings/giorgia-dellaferrera.jpeg differ diff --git a/events-recordings/giorgia-dellaferrera.jpg b/events-recordings/giorgia-dellaferrera.jpg new file mode 100644 index 00000000..06525e30 Binary files /dev/null and b/events-recordings/giorgia-dellaferrera.jpg differ diff --git a/events-recordings/giorgia-dellaferrera_hua839c980f4c66a1a65fd81ca426e703b_92518_1024x0_resize_q75_box.jpeg b/events-recordings/giorgia-dellaferrera_hua839c980f4c66a1a65fd81ca426e703b_92518_1024x0_resize_q75_box.jpeg new file mode 100644 index 00000000..1092800d Binary files /dev/null and b/events-recordings/giorgia-dellaferrera_hua839c980f4c66a1a65fd81ca426e703b_92518_1024x0_resize_q75_box.jpeg differ diff --git a/events-recordings/giorgia-dellaferrera_hua839c980f4c66a1a65fd81ca426e703b_92518_480x0_resize_q75_box.jpeg b/events-recordings/giorgia-dellaferrera_hua839c980f4c66a1a65fd81ca426e703b_92518_480x0_resize_q75_box.jpeg new file mode 100644 index 00000000..cef0f18d Binary files /dev/null and b/events-recordings/giorgia-dellaferrera_hua839c980f4c66a1a65fd81ca426e703b_92518_480x0_resize_q75_box.jpeg differ diff --git a/events-recordings/gregor-lenz.jpeg b/events-recordings/gregor-lenz.jpeg new file mode 100644 index 00000000..fe60b3c1 Binary files /dev/null and b/events-recordings/gregor-lenz.jpeg differ diff --git a/events-recordings/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_1024x0_resize_q75_box.jpeg b/events-recordings/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_1024x0_resize_q75_box.jpeg new file mode 100644 index 00000000..18630779 Binary files /dev/null and b/events-recordings/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_1024x0_resize_q75_box.jpeg differ diff --git a/events-recordings/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_480x0_resize_q75_box.jpeg b/events-recordings/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_480x0_resize_q75_box.jpeg new file mode 100644 index 00000000..2bbd8587 Binary files /dev/null and b/events-recordings/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_480x0_resize_q75_box.jpeg differ diff --git a/events-recordings/index.html b/events-recordings/index.html new file mode 100644 index 00000000..c44e868f --- /dev/null +++ b/events-recordings/index.html @@ -0,0 +1,15 @@ +Events recordings

Events recordings

2023-01-26: Trevor Bekolay, Nengo - Applied Brain Research

Trevor Bekolay

Recording

https://youtu.be/sgu9l_bqAHM

Slides

click here

Speaker’s bio

Trevor Bekolay’s primary research interest is in learning and memory. In his Master’s degree, he explored how to do supervised, unsupervised, and reinforcement learning in networks of biologically plausible spiking neurons. In his PhD, he applied this knowledge to the domain of speech to explore how sounds coming into the ear become high-level linguistic representations, and how those representations become sequences of vocal tract movements that produce speech.

Trevor is also passionate about reproducible science, particularly when complex software pipelines are involved. In 2013, he started a development effort to reimplement the Nengo neural simulator from scratch in Python, which has now grown to a project with over 20 contributors around the world.

2023-02-14: Giorgia Dellaferrera, PEPITA - A forward-forward alternative to backpropagation

Giorgia Dellaferrera

Recording

https://youtu.be/RKgdUrCun5w

Slides

click here

Speaker’s bio

Giorgia Dellaferrera has completed her PhD in computational neuroscience at the Institute of Neuroinformatics (ETH Zurich and the University of Zurich) and IBM Research Zurich with Prof. Indiveri, Prof. Eleftheriou and Dr. Pantazi. Her doctoral thesis focused on the interplay between neuroscience and artificial intelligence, with an emphasis on learning mechanisms in brains and machines. During her PhD, she visited the lab of Prof. Kreiman at the Harvard Medical School (US), where she developed a biologically inspired training strategy for artificial neural networks. Before her PhD, Giorgia obtained a master in Applied Physics at the Swiss Federal Institute of Technology Lausanne (EPFL) and worked as an intern at the Okinawa Institute of Science and Technology, Logitech, Imperial College London, and EPFL.

2023-03-02: Jason Eshraghian, Hands-on session with snnTorch

Jason Eshraghian

Recording

https://youtu.be/aUjWRpisRRg

Notebooks

https://github.com/open-neuromorphic/hands-on-session-snntorch-230302

Speaker’s bio

Jason K. Eshraghian is an Assistant Professor at the Department of Electrical and Computer Engineering at UC Santa Cruz, CA, USA. Prior to that, he was a Post-Doctoral Researcher at the Department of Electrical Engineering and Computer Science, University of Michigan in Ann Arbor. He received the Bachelor of Engineering (Electrical and Electronic) and the Bachelor of Laws degrees from The University of Western Australia, WA, Australia in 2016, where he also completed his Ph.D. Degree.

Professor Eshraghian was awarded the 2019 IEEE VLSI Best Paper Award, the Best Paper Award at 2019 IEEE Artificial Intelligence CAS Conference, and the Best Live Demonstration Award at 2020 IEEE ICECS for his work on neuromorphic vision and in-memory computing using RRAM. He currently serves as the secretary-elect of the IEEE Neural Systems and Applications Committee, and was a recipient of the Fulbright Future Fellowship (Australian-America Fulbright Commission), the Forrest Research Fellowship (Forrest Research Foundation), and the Endeavour Fellowship (Australian Government).

2023-03-21: Catherine Schuman, Evolutionary Optimization for Neuromorphic Systems

Catherine Schuman

Recording

https://youtu.be/-g5XZDJPoO8

Speaker’s bio

Catherine (Katie) Schuman is an Assistant Professor in the Department of Electrical Engineering and Computer Science at the University of Tennessee (UT). She received her Ph.D. in Computer Science from UT in 2015, where she completed her dissertation on the use of evolutionary algorithms to train spiking neural networks for neuromorphic systems. Katie previously served as a research scientist at Oak Ridge National Laboratory, where her research focused on algorithms and applications of neuromorphic systems. Katie co-leads the TENNLab Neuromorphic Computing Research Group at UT. She has over 100 publications as well as seven patents in the field of neuromorphic computing. She received the Department of Energy Early Career Award in 2019.

2023-04-04: Gregor Lenz, Hands-on session with Sinabs and Speck

Gregor Lenz

Recording

https://youtu.be/kOiyRtvPO2Q

Speaker’s bio

Gregor Lenz graduated with a Ph.D. in neuromorphic engineering from Sorbonne University. He thinks that technology can learn a thing or two from how biological systems process information.

His main interests are event cameras that are inspired by the human retina and spiking neural networks that mimic human brain in an effort to teach machines to compute a bit more like humans do. At the very least there are some power efficiency gains to be made, but hopefully more! Also he loves to build open source software for spike-based machine learning. You can find more information on his personal website.

He is the maintainer of two open source projects in the field of neuromorphic computing, Tonic and expelliarmus.

2023-04-26: Dylan Muir, Hands-on session with Xylo and Rockpool

Dylan Muir

Dylan Muir

Recording

https://youtu.be/WsAqVuQ3B-I

Code

https://github.com/synsense/OpenNeuromorphic_26042023

Slides

https://github.com/synsense/OpenNeuromorphic_26042023/raw/main/slides.pdf

Speaker’s bio

Dylan Muir is the Vice President for Global Research Operations; Director for Algorithms and Applications; and Director for Global Business Development at SynSense. Dr. Muir is a specialist in architectures for neural computation. He has published extensively in computational and experimental neuroscience. At SynSense he is responsible for the company research vision, and directing development of neural architectures for signal processing. Dr. Muir holds a Doctor of Science (PhD) from ETH Zurich, and undergraduate degrees (Masters) in Electronic Engineering and in Computer Science from QUT, Australia.

2023-05-31: Andreas Wild & Mathis Richter, Lava - an open-source software framework for developing neuro-inspired applications.

The Lava framework

The Lava framework

Recording

https://www.youtube.com/watch?v=vXZukQ6A79k

Speakers’ bios

Andreas Wild received the Dr. rer. nat degree in physics with a focus on the development of silicon-based electron spin qubits from the Technical University of Munich, Germany, in 2013. After joining Intel in 2013, he has been a Senior Researcher with the Intel Neuromorphic Computing Lab since 2015 where he leads algorithm research.

Mathis Richter is a Research Scientist in the Neuromorphic Computing Lab at Intel Labs, where he leads the Application Software team, developing commercial software solutions based on neuromorphic technology. Before joining Intel in 2021, he worked as a post doc and PhD student on neural process models of higher cognition at the Institute for Neural Computation, Ruhr-University Bochum.

2023-06-08: Federico Corradi

Federico Corradi

Federico Corradi

Recording

https://youtu.be/xiYUVzdwDIA.

Speaker’s bio

Dr. Federico Corradi is an Assistant Professor in the Electrical Engineering Department. His research activities are in Neuromorphic Computing and Engineering and span from the development of efficient models of computation to novel microelectronic architectures, with CMOS and emerging technologies, for both efficient deep learning and brain-inspired algorithms. His long-term research goal is to understand the principles of computation in natural neural systems and apply those for the development of a new generation of energy-efficient sensing and computing technologies. His research outputs find use in several application domains as robotics, machine vision, temporal signal processing, and biomedical signal analysis.

Dr. Corradi received a Ph.D. degree from the University of Zurich in Neuroinformatics and an international Ph.D. from the ETH Neuroscience Centre Zurich in 2015. He was a Postgraduate at the Institute of Neuroinformatics in 2018. From 2015 to 2018, he worked in the Institute of Neuroinformatics’ spin-off company Inilabs, developing event-based cameras and neuromorphic processors. From 2018 to 2022, he was at IMEC, the Netherlands, where he started a group focusing on neuromorphic ICs design activities. His passion for research recently brought him back to academia while keeping strong ties with startups and companies.

He is an active review editor of Frontiers in Neuromorphic Engineering, IEEE, and other international journals. In addition, he currently serves as a technical program committee member of several machine learning and neuromorphic symposiums and conferences (ICTOPEN, ICONS, DSD, EUROMICRO).

2023-07-11: Konrad Kording, Does the brain do gradient descent?

Konrad Kording

Konrad Kording

Recording

https://youtu.be/E5hATeCZQnU.

Speaker’s bio

Konrad Kording runs his lab at the University of Pennsylvania. Konrad is interested in the question of how the brain solves the credit assignment problem and similarly how we should assign credit in the real world (through causality). In extension of this main thrust he is interested in applications of causality in biomedical research. Konrad has trained as student at ETH Zurich with Peter Konig, as postdoc at UCL London with Daniel Wolpert and at MIT with Josh Tenenbaum. After a decade at Northwestern University, he is now PIK professor at UPenn.

2023-07-19: Lana Josipović, From C/C++ to Dynamically Scheduled Circuits

Lana Josipović

Lana Josipović

Recording

https://youtu.be/mQU8iU0HyHw.

Speaker’s bio

Lana Josipović is an Assistant Professor in the Department of Information Technology and Electrical Engineering at ETH Zurich. Prior to joining ETH Zurich in January 2022, she received a Ph.D. degree in Computer Science from EPFL, Switzerland. Her research interests include reconfigurable computing and electronic design automation, with an emphasis on high-level synthesis techniques to generate hardware designs from high-level programming languages. She developed Dynamatic, an open-source high-level synthesis tool that produces dynamically scheduled circuits from C/C++ code. She is a recipient of the EDAA Outstanding Dissertation Award, Google Ph.D. Fellowship in Systems and Networking, Google Women Techmakers Scholarship, and Best Paper Award at FPGA'20.

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/events-recordings/jason-eshraghian.webp b/events-recordings/jason-eshraghian.webp new file mode 100644 index 00000000..ffe17a39 Binary files /dev/null and b/events-recordings/jason-eshraghian.webp differ diff --git a/events-recordings/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_1024x0_resize_q75_h2_box_2.webp b/events-recordings/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_1024x0_resize_q75_h2_box_2.webp new file mode 100644 index 00000000..f5a6676b Binary files /dev/null and b/events-recordings/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_1024x0_resize_q75_h2_box_2.webp differ diff --git a/events-recordings/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_480x0_resize_q75_h2_box_2.webp b/events-recordings/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_480x0_resize_q75_h2_box_2.webp new file mode 100644 index 00000000..b609d50c Binary files /dev/null and b/events-recordings/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_480x0_resize_q75_h2_box_2.webp differ diff --git a/events-recordings/konrad-kording.jpg b/events-recordings/konrad-kording.jpg new file mode 100644 index 00000000..3f13b6d6 Binary files /dev/null and b/events-recordings/konrad-kording.jpg differ diff --git a/events-recordings/lana-josipovic.jpg b/events-recordings/lana-josipovic.jpg new file mode 100644 index 00000000..7c1a777f Binary files /dev/null and b/events-recordings/lana-josipovic.jpg differ diff --git a/events-recordings/lava-intel.png b/events-recordings/lava-intel.png new file mode 100644 index 00000000..627fc961 Binary files /dev/null and b/events-recordings/lava-intel.png differ diff --git a/events-recordings/trevor-bekolay.jpeg b/events-recordings/trevor-bekolay.jpeg new file mode 100644 index 00000000..bb679d69 Binary files /dev/null and b/events-recordings/trevor-bekolay.jpeg differ diff --git a/events-recordings/trevor-bekolay_hufbec93062349485948350099f8024531_51540_1024x0_resize_q75_box.jpeg b/events-recordings/trevor-bekolay_hufbec93062349485948350099f8024531_51540_1024x0_resize_q75_box.jpeg new file mode 100644 index 00000000..316bcbbf Binary files /dev/null and b/events-recordings/trevor-bekolay_hufbec93062349485948350099f8024531_51540_1024x0_resize_q75_box.jpeg differ diff --git a/events-recordings/trevor-bekolay_hufbec93062349485948350099f8024531_51540_480x0_resize_q75_box.jpeg b/events-recordings/trevor-bekolay_hufbec93062349485948350099f8024531_51540_480x0_resize_q75_box.jpeg new file mode 100644 index 00000000..2e300c4c Binary files /dev/null and b/events-recordings/trevor-bekolay_hufbec93062349485948350099f8024531_51540_480x0_resize_q75_box.jpeg differ diff --git a/events/2023-01-26-Nengo.pdf b/events/2023-01-26-Nengo.pdf new file mode 100644 index 00000000..4d6a531d Binary files /dev/null and b/events/2023-01-26-Nengo.pdf differ diff --git a/events/2023-02-14-Giorgia-Dellaferrera.pdf b/events/2023-02-14-Giorgia-Dellaferrera.pdf new file mode 100644 index 00000000..18c16f98 Binary files /dev/null and b/events/2023-02-14-Giorgia-Dellaferrera.pdf differ diff --git a/events/catherine-schuman.webp b/events/catherine-schuman.webp new file mode 100644 index 00000000..34000988 Binary files /dev/null and b/events/catherine-schuman.webp differ diff --git a/events/corradi.jpg b/events/corradi.jpg new file mode 100644 index 00000000..58d66c53 Binary files /dev/null and b/events/corradi.jpg differ diff --git a/events/giorgia-dellaferrera.jpeg b/events/giorgia-dellaferrera.jpeg new file mode 100644 index 00000000..95f4b3f0 Binary files /dev/null and b/events/giorgia-dellaferrera.jpeg differ diff --git a/events/giorgia-dellaferrera.jpg b/events/giorgia-dellaferrera.jpg new file mode 100644 index 00000000..06525e30 Binary files /dev/null and b/events/giorgia-dellaferrera.jpg differ diff --git a/events/giulia-dangelo.jpg b/events/giulia-dangelo.jpg new file mode 100644 index 00000000..2a1a6a4d Binary files /dev/null and b/events/giulia-dangelo.jpg differ diff --git a/events/gregor-lenz.jpeg b/events/gregor-lenz.jpeg new file mode 100644 index 00000000..fe60b3c1 Binary files /dev/null and b/events/gregor-lenz.jpeg differ diff --git a/events/index.html b/events/index.html new file mode 100644 index 00000000..c3f50b32 --- /dev/null +++ b/events/index.html @@ -0,0 +1,9 @@ +Events
Featured image of post Events

Events

Events organised by ONM: talks, hands-on sessions and more.

Upcoming events

Join our newsletter to be updated on new events and get a reminder!

2023-09-25: Giulia D’Angelo, What’s catching your eye? The visual attention mechanism

Giulia D’Angelo

Giulia D’Angelo

Time

6PM-7:30PM, CEST.

Abstract

Every agent, whether animal or robotic, needs to process its visual sensory input in an efficient way, to allow understanding of, and interaction with, the environment. The process of filtering revelant information out of the continuous bombardment of complex sensory data is called selective attention. Visual attention is the result of the complex interplay between bottom-up and top-down mechanisms to perceptually organise and understand the scene. Giulia will describe how to approach visual attention using bio-inspired models emulating the human visual system to allow robots to interact with their surroundings.

Speaker’s bio

Giulia D’Angelo is a postdoctoral researcher in neuroengineering in the EDPR laboratory at the Italian Institute of Technology. She obtained a B.Sc. in biomedical engineering and an M.Sc. in neuroengineering, developing a neuromorphic visual system at the King’s College of London. She successfully defended her Ph.D. VIVA in 2022 at the university of Manchester, proposing a biologically plausible model for event-driven saliency-based visual attention. She is currently working on bio-inspired visual algorithms exploiting neuromorphic platforms.

2023-10-22: Innatera

Innatera

Innatera

TBD.

2023-11-16: Timoleon Moraitis, Making neuromorphic computing mainstream

Timoleon Moraitis

Timoleon Moraitis

Time

6PM-7PM, CET.

Abstract

TBD.

Speaker’s bio

TBD.

Tobias Fischer

Tobias Fischer

Tobias Fischer

Abstract

TBD.

Speaker’s bio

Tobias conducts interdisciplinary research at the intersection of intelligent robotics, computer vision, and computational cognition. His main goal is to develop high-performing, bio-inspired computer vision algorithms that simultaneously examine animals/humans and robots’ perceptional capabilities.

He is a Lecturer (Assistant Professor) in Queensland University of Technology’s Centre for Robotics. He joined the Centre as an Associate Investigator and Research Fellow in January 2020. Previously, he was a postdoctoral researcher in the Personal Robotics Lab at Imperial College London.

He received a PhD from Imperial College in January 2019. His thesis was awarded the UK Best Thesis in Robotics Award 2018 and the Eryl Cadwaladr Davies Award for the best thesis in Imperial’s EEE Department in 2017-2018. He previously received an M.Sc. degree (distinction) in Artificial Intelligence from The University of Edinburgh in 2014 and a B.Sc. degree in Computer Engineering from Ilmenau University of Technology, Germany, in 2013. His works have attracted two best poster awards, one best paper award, and he was the senior author of the winning submission to the Facebook Mapillary Place Recognition Challenge 2020.

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/events/innatera.png b/events/innatera.png new file mode 100644 index 00000000..428b319d Binary files /dev/null and b/events/innatera.png differ diff --git a/events/jason-eshraghian.webp b/events/jason-eshraghian.webp new file mode 100644 index 00000000..ffe17a39 Binary files /dev/null and b/events/jason-eshraghian.webp differ diff --git a/events/konrad-kording.jpg b/events/konrad-kording.jpg new file mode 100644 index 00000000..3f13b6d6 Binary files /dev/null and b/events/konrad-kording.jpg differ diff --git a/events/lana-josipovic.jpg b/events/lana-josipovic.jpg new file mode 100644 index 00000000..7c1a777f Binary files /dev/null and b/events/lana-josipovic.jpg differ diff --git a/events/lava-intel.png b/events/lava-intel.png new file mode 100644 index 00000000..627fc961 Binary files /dev/null and b/events/lava-intel.png differ diff --git a/events/timoleon-moraitis.png b/events/timoleon-moraitis.png new file mode 100644 index 00000000..3bab2822 Binary files /dev/null and b/events/timoleon-moraitis.png differ diff --git a/events/tobias-fischer.webp b/events/tobias-fischer.webp new file mode 100644 index 00000000..e7ddf86e Binary files /dev/null and b/events/tobias-fischer.webp differ diff --git a/events/trevor-bekolay.jpeg b/events/trevor-bekolay.jpeg new file mode 100644 index 00000000..bb679d69 Binary files /dev/null and b/events/trevor-bekolay.jpeg differ diff --git a/getting-involved/index.html b/getting-involved/index.html new file mode 100644 index 00000000..b2374858 --- /dev/null +++ b/getting-involved/index.html @@ -0,0 +1,13 @@ +Getting involved

Getting involved

The easiest way to get in touch is probably through Discord, where we discuss research topics, job opportunities, open hardware, spiking neural network training and much more. We’d be delighted to have you join! +If you feel like contributing to ONM but you’re not exactly sure how, here are some ideas to get you started:

  • Link an interesting open source repository to our collection so that others can find it too! This can be a framework / package that deals with neuromorphic things or a neat implementation for example! ONM is meant as a platform to showcase your code and others'!
  • Write a blog post together with the community. If you think you learned something useful that you’d like to share with the community, you can simply post your draft on our Discord to ask for some feedback. In case you then want to publish it on our website, take a look at the structure of existing posts and open a PR for a new one. Think about it as a mini paper!
  • Why not host your code in the ONM organisation directly? It’ll boost visibility and you can get instant help/feedback from community members. You can also migrate an existing repository if you wish to do so.
Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/img/ONM-logo.png b/img/ONM-logo.png new file mode 100644 index 00000000..486c216e Binary files /dev/null and b/img/ONM-logo.png differ diff --git a/img/ONM.png b/img/ONM.png new file mode 100644 index 00000000..bc5eadbf Binary files /dev/null and b/img/ONM.png differ diff --git a/index.html b/index.html new file mode 100644 index 00000000..2f59926a --- /dev/null +++ b/index.html @@ -0,0 +1,6 @@ +Open Neuromorphic
Featured image of post SNN library benchmarks

SNN library benchmarks

Comparing the most popular SNN frameworks for gradient-based optimization on top of PyTorch.

+ \ No newline at end of file diff --git a/index.xml b/index.xml new file mode 100644 index 00000000..9a5ad425 --- /dev/null +++ b/index.xml @@ -0,0 +1,14 @@ +Open Neuromorphichttps://open-neuromorphic.org/Recent content on Open NeuromorphicHugo -- gohugo.ioen-usWed, 02 Aug 2023 00:00:00 +0000SNN library benchmarkshttps://open-neuromorphic.org/p/snn-library-benchmarks/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/p/snn-library-benchmarks/<img src="https://open-neuromorphic.org/p/snn-library-benchmarks/framework-benchmarking-16k-header.png" alt="Featured image of post SNN library benchmarks" />SNN library benchmarks Open Neuromorphic&rsquo;s list of SNN frameworks currently counts 10 libraries, and those are only the most popular ones! As the sizes of spiking neural network models grow thanks to deep learning, optimization becomes more important for researchers and practitioners alike. Training SNNs is often slow, as the stateful networks are typically fed sequential inputs. Today&rsquo;s most popular training method then is some form of backpropagation through time, whose time complexity scales with the number of time steps.Bits of Chips | TrueNorthhttps://open-neuromorphic.org/p/bits-of-chips-truenorth/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/p/bits-of-chips-truenorth/<img src="https://open-neuromorphic.org/p/bits-of-chips-truenorth/brain-to-chip.png" alt="Featured image of post Bits of Chips | TrueNorth" />Why do we want to emulate the brain? If you have ever read an article on neuromorphic computing, you might have noticed that in the introduction of each of these there is the same statement: &ldquo;The brain is much powerful than any AI machine when it comes to cognitive tasks but it runs on a 10W power budget!&rdquo;. This is absolutely true: neurons in the brain communicate among each other by means of spikes, which are short voltage pulses that propagate from one neuron to the other.Efficient compression for event-based datahttps://open-neuromorphic.org/p/efficient-compression-for-event-based-data/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/<img src="https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/file_read_benchmark.png" alt="Featured image of post Efficient compression for event-based data" />Efficient compression for event-based data Datasets grow larger in size As neuromorphic algorithms tackle more complex tasks that are linked to bigger datasets, and event cameras mature to have higher spatial resolution, it is worth looking at how to encode that data efficiently when storing it on disk. To give you an example, Prophesee&rsquo;s latest automotive object detection dataset is some 3.5 TB in size for under 40h of recordings with a single camera.Digital neuromophic hardware read listhttps://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/<img src="https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png" alt="Featured image of post Digital neuromophic hardware read list" />Here&rsquo;s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles! +If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here. +2015 TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al.Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer. +In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron.Open Neuromorphichttps://open-neuromorphic.org/p/open-neuromorphic/Wed, 21 Dec 2022 00:00:00 +0000https://open-neuromorphic.org/p/open-neuromorphic/<img src="https://open-neuromorphic.org/p/open-neuromorphic/ONM.png" alt="Featured image of post Open Neuromorphic" />This organisation is created by a loose collective of open source collaborators across academia, industry and individual contributors. What connects us is the love for building tools that can be used in the neuromorphic community and we want to share ownership of this vision. +Open Neuromorphic (ONM) provides the following things: +A curated list of software frameworks to make it easier to find the tool you need. A platform for your code.Abouthttps://open-neuromorphic.org/about/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/about/<img src="https://open-neuromorphic.org/about/ONM.png" alt="Featured image of post About" />This organisation is created by a loose collective of open source collaborators across academia, industry and individual contributors. Most of us have never met in person before but have started contributing to a common or each other&rsquo;s projects! What connects us is the love for building tools that can be used in the neuromorphic community and we want to share ownership of this vision. If you feel like that resonates with you, please don&rsquo;t hesitate to get in touch!Eventshttps://open-neuromorphic.org/events/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/events/<img src="https://open-neuromorphic.org/img/ONM.png" alt="Featured image of post Events" />Upcoming events Join our newsletter to be updated on new events and get a reminder! +2023-09-25: Giulia D&rsquo;Angelo, What&rsquo;s catching your eye? The visual attention mechanism Giulia D&rsquo;Angelo +Time 6PM-7:30PM, CEST. +Abstract Every agent, whether animal or robotic, needs to process its visual sensory input in an efficient way, to allow understanding of, and interaction with, the environment. The process of filtering revelant information out of the continuous bombardment of complex sensory data is called selective attention.Events recordingshttps://open-neuromorphic.org/events-recordings/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/events-recordings/2023-01-26: Trevor Bekolay, Nengo - Applied Brain Research Recording https://youtu.be/sgu9l_bqAHM +Slides click here +Speaker&rsquo;s bio Trevor Bekolay’s primary research interest is in learning and memory. In his Master’s degree, he explored how to do supervised, unsupervised, and reinforcement learning in networks of biologically plausible spiking neurons. In his PhD, he applied this knowledge to the domain of speech to explore how sounds coming into the ear become high-level linguistic representations, and how those representations become sequences of vocal tract movements that produce speech.Getting involvedhttps://open-neuromorphic.org/getting-involved/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/getting-involved/The easiest way to get in touch is probably through Discord, where we discuss research topics, job opportunities, open hardware, spiking neural network training and much more. We&rsquo;d be delighted to have you join! If you feel like contributing to ONM but you&rsquo;re not exactly sure how, here are some ideas to get you started: +Link an interesting open source repository to our collection so that others can find it too!Resourceshttps://open-neuromorphic.org/resources/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/resources/Please check our Github repository for a list of neuromorphic open source software and hardware!Teamhttps://open-neuromorphic.org/team/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/team/Fabrizio Ottati Fabrizio Ottati +Fabrizio Ottati is a Ph.D. student in the Department of Electronics and Communications of Politecnico di Torino, under the supervision of professor Luciano Lavagno and professor Mario Roberto Casu. +His main interests are event-based cameras, digital hardware design and automation, spiking neural networks and piedmontese red wine. He is the maintainer of two open source projects in the field of neuromorphic computing, Tonic and Expelliarmus. You can find more information on his website. \ No newline at end of file diff --git a/onm-events/event-22-12-13/slides/charlotte-frenkel.pdf b/onm-events/event-22-12-13/slides/charlotte-frenkel.pdf new file mode 100644 index 00000000..79e9c5f6 Binary files /dev/null and b/onm-events/event-22-12-13/slides/charlotte-frenkel.pdf differ diff --git a/onm-events/event-22-12-13/slides/fabrizio-ottati.pdf b/onm-events/event-22-12-13/slides/fabrizio-ottati.pdf new file mode 100644 index 00000000..dd0b18ba Binary files /dev/null and b/onm-events/event-22-12-13/slides/fabrizio-ottati.pdf differ diff --git a/p/bits-of-chips-truenorth/brain-to-chip.png b/p/bits-of-chips-truenorth/brain-to-chip.png new file mode 100644 index 00000000..3d572ff3 Binary files /dev/null and b/p/bits-of-chips-truenorth/brain-to-chip.png differ diff --git a/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_120x120_fill_box_smart1_3.png b/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_120x120_fill_box_smart1_3.png new file mode 100644 index 00000000..cb4f20bb Binary files /dev/null and b/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_120x120_fill_box_smart1_3.png differ diff --git a/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_1600x0_resize_box_3.png b/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_1600x0_resize_box_3.png new file mode 100644 index 00000000..57f2b2ba Binary files /dev/null and b/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_1600x0_resize_box_3.png differ diff --git a/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_800x0_resize_box_3.png b/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_800x0_resize_box_3.png new file mode 100644 index 00000000..740d126a Binary files /dev/null and b/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_800x0_resize_box_3.png differ diff --git a/p/bits-of-chips-truenorth/crossbar.png b/p/bits-of-chips-truenorth/crossbar.png new file mode 100644 index 00000000..c4da7897 Binary files /dev/null and b/p/bits-of-chips-truenorth/crossbar.png differ diff --git a/p/bits-of-chips-truenorth/imc.jpg b/p/bits-of-chips-truenorth/imc.jpg new file mode 100644 index 00000000..c7a8c0f4 Binary files /dev/null and b/p/bits-of-chips-truenorth/imc.jpg differ diff --git a/p/bits-of-chips-truenorth/index.html b/p/bits-of-chips-truenorth/index.html new file mode 100644 index 00000000..6fcd5931 --- /dev/null +++ b/p/bits-of-chips-truenorth/index.html @@ -0,0 +1,32 @@ +Bits of Chips | TrueNorth
Featured image of post Bits of Chips | TrueNorth

Bits of Chips | TrueNorth

Analysis of the TrueNorth chip and article.

Why do we want to emulate the brain?

If you have ever read an article on neuromorphic computing, you might have noticed that in the introduction of each of these there is the same statement: “The brain is much powerful than any AI machine when it comes to cognitive tasks but it runs on a 10W power budget!”. This is absolutely true: neurons in the brain communicate among each other by means of spikes, which are short voltage pulses that propagate from one neuron to the other. The average spiking activity is estimated to be around 10Hz (i.e. a spike every 100ms). This yields very low processing power consumption, since the activity in the brain results to be really sparse (at least, this is the hypothesis).

How can the brain do all this? There are several reasons (or hypotheses, I should say):

  • the 3D connectivity among neurons. While in nowadays chip we can place connections among logic gates and circuits only in the 2D space, in the brain we have the whole 3D space at our disposal; this allows the mammalian brain to reach a fanout in the order or 10 thousand connections per neuron.
  • extremely low power operation. Trough thousands of years of evolution, the most power efficient “brain implementation” has won, since the ones that consume less energy to live are the ones that turn out to survive when there is no food (not entirely correct but I hope that true scientists won’t kill me). The power density in the brain is estimated to be 10mW per squared centimeter, while in a modern digital processor we easily reach 100W per squared centimeter.

Hence, IBM decide to try to emulate the brain with TrueNorth, a 4096 cores chip packing 1 million neurons and 256 million synapses. Let’s dive into its design!

Introduction

The TrueNorth design has been driven by seven principles.

Purely event-driven architecture

The architecture is a purely event-driven one, being Globally Asynchronous Locally Synchronous (GALS), with a completely asynchronous interconnection fabric among the synchronous cores. What does this actually mean?

A Globally Asynchronous Locally Synchronous architecture.

A Globally Asynchronous Locally Synchronous architecture. +Source

In general, in a GALS architecture, there is an array of processing elements (PEs) which are synchronised through a global clock. The local clocks in the PEs can be different for each of them, since each PE may be running at a different speed. When two different clock domains have to be interfaced, the communication among them is effectively asynchronous: handshake protocols have to be implement among these in order to guarantee proper global operation.

In TrueNorth, as in SpiNNaker, there is no global clock: the PEs, which are neurosynaptic cores, are interconnected through a completely asynchronous network. In this way, the chip operations is event-driven, since the network gets activated only when there are spikes (and other kind of events) to be transmitted.

Low power operation

The CMOS process employed is a low-power one, with the goal of minimising static power. The technology node is 28nm CMOS.

Massive parallelism

Since the brain is a massively parallel architecture, employing 100 billion neurons each of which has a fanout of approximately 10 thousand synapses, parallelism is a key feature of TrueNorth: the chip employs 1 million neurons and 256 million synapses, by interconnecting 4096 cores, each of which models 256 neurons and 64 thousand synapses.

Real time operation

The authors claim real-time operation, which translates to a global time synchronisations of 1ms, i.e. the neurons are updated and spike every millisecond.

Scalable design

The architecture is scalable: multiple cores can be put together and, since the clock signal is distributed only locally, in the core fabric, the global clock signal skew problem of modern VLSI digital circuits does not affect TrueNorth.

Error tolerance

Redundancy is employed in the design, especially in the memory circuits, to make the chip tolerant to defects.

One-to-one correspondence between software and hardware

The chip operation corresponds perfectly to the software operation, when using the IBM TrueNorth application design software.

Designing an asynchronous circuit is a very difficult task, since no VLSI EDAs are available for this kind of design (well, actually now there is a research-level one); hence, the TrueNorth designers have decided to use conventional EDAs for the synchronous cores design and custom design tools and flows for the asynchronous interconnection fabric.

Architecture

Who’s Von Neumann?

The TrueNorth chip is not a Von Neumann machine! But what does this mean?

The Von Neumann architecture.

The Von Neumann architecture. +Source

In a Von Neumann machine, like the one depicted above the processing unit is separated from the memory one, which stores both data and instructions. The processor reads the instructions from the memory, decodes them, retrieves the data on which it needs to operate from the same memory and, then, executes the instructions.

A neuromorphic chip, in principle, is an in-memory computing architecture: in this, there are not a central memory and a central processing unit, but storage and computational circuitry are distributed, i.e. we have many small memories and small computational units, like it is shown in the figure below.

An in-memory computing architecture.

An in-memory computing architecture. +Source

There are two main advantages to this approach:

  • lower energy consumption associated to memory accesses. The main power consumption involved in a memory access is the one corresponding to the bus data movement. A data bus, simplifying, is a big $RC$ circuit, and every time we make the signals on it change, we consume a lot of power to drive this equivalent circuit. One can easily deduce that the value of both resistance and capacitance are directly proportional to the bus length! Hence, by putting the processing element (PE) and memory close to each other, we reduce the data movement power consumption.
  • lower latency associated to memory accesses. A big $RC$ circuit (i.e. a long bus) is also slower than a short one (i.e. the time constant associated to the equivalent circuit is larger); hence, by shortening its length, we also reduce the time needed to read or write data to the memory.
  • high parallelism. The PEs can work all in parallel, since each of these can access its owns data independently of the other PEs.

However, there is no such thing as a free lunch: this kind of architecture comes with one big disadvantage, among others: area occupation of the circuit. +In a Von Neumann architecture, the memory density is higher: in VLSI circuits, the larger the memory, the higher the number of bits you can memorise per squared micrometer; hence, the total area occupied by the memories in an in-memory computing architecture is larger than the one corresponding to a Von-Neumann circuit. Moreover, you have multiple PEs performing the same operation on different data (this kind of architecture is also called Single Instruction Multiple Data (SIMD)); with a central memory, you can use a single PE to perform the same operations, saving lots of chip area at the expense of performance, since you cannot perform operations in parallel.

Memory and computation co-location: emulating the brain

In TrueNorth, a mixed approach has been adopted: a neurosynaptic core packs 256 neurons in memory, which share the same PE; 4096 cores are arranged in an in-memory computing fashion for the advantages cited before. However, what is a neuron?

A typical neuron.

A typical neuron. +Source

A neuron is made of different parts, that are shown in the figure above. Dendrites branch out from the cell body, also called soma, where the nucleus is located. Then, there is a long communication channel called axon, which ends in the pre-synaptic terminal, which can have multiple branches.

The neuron dendrites.

The neuron dendrites. +Source

Dendrites branch out from the soma. Their function is to receive information from other neurons. Some dendrites have small protrusions called spines that are important for communicating with other neurons.

The neuron soma.

The neuron soma. +Source

The soma is where the computation happens. This is where the membrane potential is built up, by ions exchange with the environment and other neurons.

The neuron axon.

The neuron axon. +Source

The axon is the communication channel of the neuron. It is attached to the neuron through the axon hillock; at the end of the axon, we find the pre-synaptic terminals, which are the “pins” used to connect to the post-synaptic terminal of other neurons. These connections are called synapses.

Synaptic connection among neurons.

Synaptic connection among neurons. +Source

The axon terminates at the pre-synaptic terminal or terminal bouton. The terminal of the pre-synaptic cell forms a synapse with another neuron or cell, known as the post-synaptic cell. When the action potential reaches the pre-synaptic terminal, the neuron releases neurotransmitters into the synapse. The neurotransmitters act on the post-synaptic cell. Therefore, neuronal communication requires both an electrical signal (the action potential) and a chemical signal (the neurotransmitter). Most commonly, pre-synaptic terminals contact dendrites, but terminals can also communicate with cell bodies or even axons. Neurons can also synapse on non-neuronal cells such as muscle cells or glands.

The terms pre-synaptic and post-synaptic are in reference to which neuron is releasing neurotransmitters and which is receiving them. Pre-synaptic cells release neurotransmitters into the synapse and those neurotransmitters act on the post-synaptic cell.

The axon transmit an action potential, which is the famous spike! This results in the release of chemical neurotransmitters to communicate with other cells. Here’s a nice video to show it in action (source).

From biology to silicon

In a neuromorphic chip, hence, memory and computational units are co-located. The neuron constitutes the computational unit, while the synapses weights and the membrane potential are the data on which the neuron operates. The chip is programmed by deciding which neurons are connected to which; hence, we do not write instructions to be executed to a memory, but we program the neurons interconnections and parameters!

A fully-connected neural network (left) and its logical representation in the TrueNorth chip (right).

A fully-connected neural network (left) and its logical representation in the TrueNorth chip (right). +Source

In the figure above, the logical representation of a TrueNorth core is reported. Consider the sub-figure on the left: on the right, the post-synaptic neurons are represented with a triangular shape, and these are connected to some neurons on the left, which outputs are represented by those AND-gate-shaped objects. It is an example of fully-connected layer in artificial neural networks.

In the sub-figure on the right, the logic implementation of this layer is depicted. Input spikes are collected in buffers: since in the chip the spikes are evaluated periodically (there is a clock tick distributed every 1ms), we need to store these until we can evaluate them; for these reason, we need local storage. Which spike is delivered to which neuron is determined by the connectivity, here illustrated through a crossbar: a dot on a wire represents a connection between the corresponding post-synaptic neuron dendrite (vertical wires) and the pre-synaptic neuron axon terminals (horizontal wires); this connection is the synapse, and its “strength” is the synapse weight.

When the clock tick arrives, the neurons process the incoming spikes and, if they have to, they spike and send these to the network of neurons. We can have local connections (i.e. the spikes are redistributed in the chip) or global connections (the spikes are delivered outside the chip through the Network-on-Chip (NoC).

There are some additional blocks, such as the Pseudo Random Number Generator (PRNG), that are used for more complex features, such as stochastic spike integration, stochastic leakage, stochastic thresholds, and so on.

Neuron model

Let’s get to the equations now! The neuron model employed in TrueNorth is the Leaky Integrate and Fire (LIF) one. The update equation is the following:

$$ V_{j}[t] = V_{j}[t-1] + \sum_{i=0}^{255} A_{i}[t] \cdot w_{i,j} \cdot s_{j}^{G_{i}} - \lambda_{j}$$

Lots of variables! Let’s study this mess:

  • $V_{j}[t]$ represents the membrane potential of the $j$-th post-synaptic neuron at timestamp $t$.
  • in a TrueNorth core, each post-synaptic neuron can be connected to 256 pre-synaptic neurons; this is why the sum spans from $i=0$ to $i=255$.
  • $A_{i}[t]$ corresponds to the $i$-th pre-synaptic neuron spike: it is equal to 1 if that neurons has spiked at timestamp $t$ and 0 otherwise.
  • $w_{i,j}$ is a binary variable that the determines if the $i$-th pre-synaptic neuron is connected to the $j$-th post-synaptic neuron: when they are, $w_{i,j}=1$, otherwise $w_{i,j}=0$.
  • $s_{j}^{G_{i}}$ determines the strength of the connection, i.e. the synapse weight value. In TrueNorth, there are four types of axons, and the axon of the $i$-th pre-synaptic neuron is identified by a value of the variable $G_{i} \in \{1,2,3,4\}$; the dendrite of the $j$-th post-synaptic neuron is identified by $s_{j}$.
  • $\lambda_{j}$ is the leakage value. At each timestamp $t$, this fixed quantity is subtracted from the membrane potential.

In the equation, the spike mechanism is missing! The authors denote the spiking threshold of the $j$-th neuron with $\alpha_{j}$: when $V_{j}[t] \gt \alpha_{j}$, the neuron potential is reset to the rest one, denoted with $R_{j}$. The following is the pseudo-code, employing the C ternary operator (LaTeX in Markdown is a mess):

$$V_{j}^{*}[t] \triangleq V_{j}[t-1] + \sum_{i=0}^{255} A_{i}[t] \cdot w_{i,j} \cdot s_{j}^{G_{i}} - \lambda_{j}$$

$$ +% \begin{equation} +V_{j}[t] = V_{j}^{*}[t] \gt \alpha_{j} ~ ? ~ R_{j} ~ : ~ V_{j}^{*}[t] +% \begin{cases} +% V_{j}^{*}[t] & \text{if $V_{j}^{*}[t] \le \alpha_{j}$}\ +% R_{j} & \text{if $V_{j}^{*}[t] \gt \alpha_{j}$} +% \end{cases} +% \end{equation} +$$

One may be wondering what the PRNG block stands for in the figure: it is a pseudo random number generator, and it is used to provide stochastic spike integration, leakage and threshold for the post-synaptic neurons.

What we have discussed until now is the model of a TrueNorth core: in this, 256 neurons are placed, each of which can be connected to other 256 neurons, which can be in the same core but also out of it. This is accomplished by interconnecting the neurons and multiple cores using a 2D mesh network; here comes the fun stuff, because the out-of-chip communication is completely asynchronous. Are you ready for an headache?

To be continued…

Stay tuned for episode II, in which we will dive in the theory behind the asynchronous design of TrueNorth :)

Authors

  • Fabrizio Ottati is a Ph.D. student in the HLS Laboratory of the Department of Electronics and Communications, Politecnico di Torino. His main interests are event-based cameras, digital hardware design and neuromorphic computing. He is one of the maintainers of two open source projects in the field of neuromorphic computing, Tonic and Expelliarmus, and one of the founders of Open Neuromorphic.

Bibliography

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png b/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png new file mode 100644 index 00000000..4b15f0b4 Binary files /dev/null and b/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png differ diff --git a/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_120x120_fill_box_smart1_3.png b/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_120x120_fill_box_smart1_3.png new file mode 100644 index 00000000..ffc0620e Binary files /dev/null and b/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_120x120_fill_box_smart1_3.png differ diff --git a/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_1600x0_resize_box_3.png b/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_1600x0_resize_box_3.png new file mode 100644 index 00000000..e9464ba5 Binary files /dev/null and b/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_1600x0_resize_box_3.png differ diff --git a/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_800x0_resize_box_3.png b/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_800x0_resize_box_3.png new file mode 100644 index 00000000..c4109523 Binary files /dev/null and b/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_800x0_resize_box_3.png differ diff --git a/p/digital-neuromophic-hardware-read-list/index.html b/p/digital-neuromophic-hardware-read-list/index.html new file mode 100644 index 00000000..5fa74f3c --- /dev/null +++ b/p/digital-neuromophic-hardware-read-list/index.html @@ -0,0 +1,15 @@ +Digital neuromophic hardware read list
Featured image of post Digital neuromophic hardware read list

Digital neuromophic hardware read list

List of research articles related to digital hardware for neuromorphic applications.

Here’s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles!

If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here.

2015

TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al., 2015

This a fully digital chip, embedding 4096 cores with 1M neurons and 256M synapses!

It adopts a mixed design methodology: the local computational cores are synchronous, while the interconnecting infrastructure is asynchronous, i.e. event-driven. In particular, each core adopts time-multiplexing to compute the states of its neurons minimizing core area; each core has 256 neurons associated.

TrueNorth claims to be operating in real-time: a 1KHz synchronization signal is used to trigger the cores computations (state update, spikes processing, etc.). Moreover, they provide a software tool with one-to-one mapping to the hardware in order to deploy applications on it.

2018

Loihi: A Neuromorphic Manycore Processor with On-Chip Learning, Mike Davies et al., 2018

Probably the most popular neuromorphic processor right now. What distinguishes it from the other ones are the online learning capabilities coupled with a completely asynchronous design: cores and routing network are completely clock-less!

Loihi supports local and scalable learning rules, through spike traces corresponding to filtered pre-synaptic and post-synaptic spike trains with configurable time constants, multiple state variables per synapse in addition to the weight value, reward traces. Moreover, several computational primitives are provided: addition of stochastic noise to the neuron’s synaptic current response; configurable and adaptable synaptic, axon and refractory delays; configurable dendtritic tree processing; neuron threshold adaptiation; scaling and saturation of synaptic weights.

The Loihi chip employs 128 neuromorphic cores, each of which consisting of 1024 primitive spiking neural units. Each Loihi core includes a programmable learning engine. Each core has an 2Mb SRAM memory on-chip, with ECC overhead included. The chip is fabbed in Intel’s 14nm FinFET process.

2019

A 0.086-mm2 12.7-pJ/SOP 64k-Synapse 256-Neuron Online-Learning Digital Spiking Neuromorphic Processor in 28nm CMOS, Charlotte Frenkel et al., 2019

In this paper, a digital neuromorphic processor is presented. The Verilog is also open source!

The neurons states and the synapses weights are stored in two foundry SRAMs on chip. In order to emulate a crossbar, time-multiplexing is adopted: the synapses weights and neurons states are updated in a sequential manner instead of in parallel. On the core, 256 neurons (4kB SRAM) and 256x256 synapses (64kB SRAM) are embedded. This allows to get a very high synapses and neuron densities: 741k synapses per squared millimiters and 3k neurons per squared millimeters, using a 28nm CMOS FDSOI process.

The neuron model is programmable through an SPI interface: the user can choose among a LIF model (8 bits for the state of each neuron) and Izhikevic one (55 bits for the state of each neuron). Online-learning capabilities are allowed with an hardware-efficient implementation of the Spike-Driven Synaptic Plasticity (SDSP) rule.

The design is fully synchronous. The time evolution of the SNN implemented on the core can be tuned choosing changing the frequency of the time reference events, allowing to update the neurons states only when events actually take place. +The result is that each Synaptic OPeration (SOP) requires only 12.7pJ when the chip is powered with a voltage of 0.55V.

MorphIC: A 65-nm 738k-Synapse/mm2 Quad-Core Binary-Weight Digital Neuromorphic Processor With Stochastic Spike-Driven Online Learning, Charlotte Frenkel et al., 2019

In this work, a quad-core neuromorphic processor is presented.

The neuron model employed is the LIF one. Synapses are quantized down to 1 bit resolution, and online learning is allowed using a stochastic version of the SDSP rule. The chip is produced in 65nm CMOS, embedding 2k LIF neurons and 2M synapses, reaching a density of 738k synapses per squared millimeters.

The neurons interconnection is arranged in a hierarchical routing solution: mesh-based interconnectivity for out-of-chip communications; star-based connectivity for inter-core communications; crossbar-based interconnectivity for intra-core communications. 27 bits per neuron are allocated, allowing for a 1k neurons fan-in for each neuron, and 2k neurons fan-out for each neuron.

2020

Always-On, Sub-300-nW, Event-Driven Spiking Neural Network based on Spike-Driven Clock-Generation and Clock- and Power-Gating for an Ultra-Low-Power Intelligent Device, Dewei Wang et al., 2020

In this work, a synchronous architecture is proposed. The logic operates at Near Threshold Voltage (NTV), and clock gating and power gating are heavily used to minimize power consumption during idle operation, which results to be 300nW. The chip is targeted at always-on applications, like keyword spotting (KWS); and it is prototyped on a 65nm CMOS process. The design is an only-inference one, with no online-learning capabilities.

The architecture belongs to the feed-forward category: 5 cores are used to implement fully connected spiking layers of Integrate and Fire (IF) neurons. To minimize power consumption, asynchronous wake-up circuits are employed to activate the layers only when there are incoming spikes.

On the GCSC and HeySnips datasets, the recognition accuracies are 91.8% and 95.8%, respectively. The total power consumption ranges between 75nW and 220nW.

2021

μBrain: An Event-Driven and Fully Synthesizable Architecture for Spiking Neural Networks, Jan Stuijt et al., 2021

This is an asynchronous digital architecture, with no online-learning capabilities provided. It is an inference-only chip.

The bit precision and network topology is chosen at synthesis time, while the neurons parameters and synapses weights can be programmed on chip. The neuron model employed is the Integrate and Fire (IF) one, with no leakage; the leakage can be added using an additional inhibitory input neuron to model it. A local clock is generated a neuron level when a spike arrives, so that the circuit consumes only static power when not operating. No time multiplexing is employed, the architecture is organised in a layer-by-layer fashion where all the neurons operate in parallel (i.e. each core corresponds to a neuron).

2022

Nice survey paper that compares different ICs, both digital and mixed-signal ones.

ReckOn: A 28nm Sub-mm2 Task-Agnostic Spiking Recurrent Neural Network Processor Enabling On-Chip Learning over Second-Long Timescales, Charlotte Frenkel and Giacomo Indiveri, 2022

In this work, a Recurrent Spiking Neural Network (RSNN) processor is presented. The Verilog code is open source.

The key feature of this chip is the online learning capability, using a modified version of the feed-forward eligibility traces algorithm, which is a bio-inspired approximation of the BackPropagation Through Time (BPTT) algorithm employed for artificial RNNs. The chip performance is validated on gesture recognition, keyword spotting and navigation, with sub-150μW and sub-squared millimeter power and area budgets.

SNE: an Energy-Proportional Digital Accelerator for Sparse Event-Based Convolutions, Alfio di Mauro et al., 2022

In this work, an only-inference digital chip is presented. The design is tuned towards event cameras output processing, employing convolution engines in the hardware.

The novelty of this design is that, even if it is a synchronous one, the number of operations performed is proportional to the number of events recorded by the camera, which allows very efficient inference when dealing with sparse inputs (e.g. low activity scenarios).

The design is validated on the IBM DVSGesture dataset, obtaining 80μJ per inference when classifying samples, with a recognition accuracy of 92.80% at most. This design is also integrated in the PULP platform; the SoC that embeds it is described in Kraken: A Direct Event/Frame-Based Multi-sensor Fusion SoC for Ultra-Efficient Visual Processing in Nano-UAVs, Alfio Di Mauro et al., 2022.

The SystemVerilog code is open source!

Sparse Compressed Spiking Neural Network Accelerator for Object Detection, Hong-Han Lien and Tian-Shehuan Chang, 2022.

The neuron model employed in this work is a LIF one, with a delta-shaped synaptic kernel. The architecture topology is a feed-forward one, in which the neuron cores are arranged either is a cascade-fashion or in a configurable Processing Element (PE) array. The focus of this chip is to deal efficiently with the sparse nature of the activation maps in an SNN, by compressing the model with sparse data structures coupled with model pruning and 8 bits fixed point parallelism to reduce the on-chip memory requirement. The SNN architecture is mixed with an ANN one.

The final implementation, validated on an object detection task, achieves 29FPS when dealing with 1024x576 input frames; the throughput efficiency is 35.88TOPS/W and 1.05mJ/frame, running at 500MHz and being taped out on the TSMC 28nm CMOS process.

The object detection network is trained offline as ANN and then converted to an SNN, using the IVS 3 classes dataset and achieving 71.5% maP with on-chip inference.

Acknowledgements

I would like to thank Charlotte Frenkel for the valuable comments and suggestions.

Credits

The cover image is taken from Charlotte Frenkel’s thesis.

Authors

  • Fabrizio Ottati is a Ph.D. student in the HLS Laboratory of the Department of Electronics and Communications, Politecnico di Torino. His main interests are event-based cameras, digital hardware design and neuromorphic computing. He is one of the maintainers of two open source projects in the field of neuromorphic computing, Tonic and Expelliarmus, and one of the founders of Open Neuromorphic.
Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/p/efficient-compression-for-event-based-data/file_read_benchmark.png b/p/efficient-compression-for-event-based-data/file_read_benchmark.png new file mode 100644 index 00000000..f448f3d3 Binary files /dev/null and b/p/efficient-compression-for-event-based-data/file_read_benchmark.png differ diff --git a/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_1024x0_resize_box_3.png b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_1024x0_resize_box_3.png new file mode 100644 index 00000000..815a262e Binary files /dev/null and b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_1024x0_resize_box_3.png differ diff --git a/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_120x120_fill_box_smart1_3.png b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_120x120_fill_box_smart1_3.png new file mode 100644 index 00000000..f25fdd31 Binary files /dev/null and b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_120x120_fill_box_smart1_3.png differ diff --git a/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_1600x0_resize_box_3.png b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_1600x0_resize_box_3.png new file mode 100644 index 00000000..6f0650ca Binary files /dev/null and b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_1600x0_resize_box_3.png differ diff --git a/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_480x0_resize_box_3.png b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_480x0_resize_box_3.png new file mode 100644 index 00000000..42f67656 Binary files /dev/null and b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_480x0_resize_box_3.png differ diff --git a/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_800x0_resize_box_3.png b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_800x0_resize_box_3.png new file mode 100644 index 00000000..22fffc66 Binary files /dev/null and b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_800x0_resize_box_3.png differ diff --git a/p/efficient-compression-for-event-based-data/file_read_benchmark_log.png b/p/efficient-compression-for-event-based-data/file_read_benchmark_log.png new file mode 100644 index 00000000..76003b25 Binary files /dev/null and b/p/efficient-compression-for-event-based-data/file_read_benchmark_log.png differ diff --git a/p/efficient-compression-for-event-based-data/file_read_benchmark_white.png b/p/efficient-compression-for-event-based-data/file_read_benchmark_white.png new file mode 100644 index 00000000..cda81565 Binary files /dev/null and b/p/efficient-compression-for-event-based-data/file_read_benchmark_white.png differ diff --git a/p/efficient-compression-for-event-based-data/index.html b/p/efficient-compression-for-event-based-data/index.html new file mode 100644 index 00000000..d70ee7e8 --- /dev/null +++ b/p/efficient-compression-for-event-based-data/index.html @@ -0,0 +1,20 @@ +Efficient compression for event-based data
Featured image of post Efficient compression for event-based data

Efficient compression for event-based data

Choosing a good trade-off between disk footprint and file loading times.

Efficient compression for event-based data

Datasets grow larger in size

As neuromorphic algorithms tackle more complex tasks that are linked to bigger datasets, and event cameras mature to have higher spatial resolution, it is worth looking at how to encode that data efficiently when storing it on disk. To give you an example, Prophesee’s latest automotive object detection dataset is some 3.5 TB in size for under 40h of recordings with a single camera.

Event cameras record with fine-grained temporal resolution

In contrast to conventional cameras, event cameras output changes in illumination, which is already a form of compression. But the output data rate is still a lot higher cameras because of the microsecond temporal resolution that event cameras are able to record with. When streaming data, we get millions of tuples of microsecond timestamps, x/y coordinates and polarity indicators per second that look nothing like a frame but are a list of events:

#  time, x, y, polarity
+[(18661,  762, 147, 1) 
+ (18669, 1161,  72, 1) 
+ (18679, 1073,  23, 0) 
+ (18688,  799, 304, 0) 
+ (18694,  234, 275, 1)]
+

File size vs reading speed trade-off

So how can we store such data efficiently? +A straightforward idea is to resort to formats such as hdf5 and numpy and store the arrays of events directly. But without exploiting any structure in the recorded data, those uncompressed formats end up having the largest file footprint. For our example automotive dataset, this would result in some 7-8 TB of data, which is undesirable. Event camera manufacturers have come up with ways to encode event streams more efficiently. Not only are we concerned about the size of event files on disk, but we also want to be able to read them back to memory as fast as possible! +In the following figure we plot the results of our benchmark of different file type encodings and software frameworks that can decode files.

Comparison among file size and read speed of different encodings and software tools.

Ideally, we want to be close to the origin where we read fast and compression is high. The file size depends on the encoding, whereas the reading speed depends on the particular implementation/framework of how files are read. In terms of file size, we can see that numpy doesn’t use any compression whatsoever, resulting in some 1.7GB file for our sample recording. Prophesee’s evt3 and the generic lossless brotli formats achieve the best compression. In terms of reading speed, numpy is the fastest as it doesn’t deal with any compression on disk. Unzipping the compressed events from disk on the other hand using h5py is by far the slowest. Using Expelliarmus and the evt2 file format, we get very close to numpy reading speeds while at the same time only using a fourth of the disk space. For more information about Prophesee event encoding formats, check out this blog post.

Capable frameworks

The authors of this post have released Expelliarmus as a lightweight, well-tested, pip-installable framework that can read and write different formats easily. If you’re working with dat, evt2 or evt3 formats, why not give it a try?

Summary

When training spiking neural networks on event-based data, we want to be able to feed new data to the network as fast as possible. But given the high data rate of an event camera, the amount of data quickly becomes an issue itself, especially for more complex tasks. So we want to choose a good trade-off between a dataset size that’s manageable and reading speed. We hope that this article will help future groups that record large-scale datasets to pick a good encoding format.

Authors

  • Gregor Lenz is a research engineer at SynSense, where he works on machine learning pipelines that can train and deploy robust models on neuromorphic hardware. He holds a PhD in neuromorphic engineering from Sorbonne University in Paris, France.
  • Fabrizio Ottati is a Ph.D. student in the HLS Laboratory of the Department of Electronics and Communications, Politecnico di Torino. His main interests are event-based cameras, digital hardware design and neuromorphic computing. He is one of the maintainers of two open source projects in the field of neuromorphic computing, Tonic and Expelliarmus, and one of the founders of Open Neuromorphic.
  • Alexandre Marcireau.

Comments

The aedat4 file contains IMU events as well as change detection events, which increases the file size artificially in contrast to the other benchmarked formats.

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/p/efficient-compression-for-event-based-data/plot-generation.ipynb b/p/efficient-compression-for-event-based-data/plot-generation.ipynb new file mode 100644 index 00000000..6c0f32b3 --- /dev/null +++ b/p/efficient-compression-for-event-based-data/plot-generation.ipynb @@ -0,0 +1,561 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from expelliarmus import Wizard\n", + "import aedat\n", + "import event_stream\n", + "import gc\n", + "import hashlib\n", + "import h5py\n", + "import aestream\n", + "import numpy as np\n", + "import timeit\n", + "import requests\n", + "import pickle\n", + "import os\n", + "import loris\n", + "import brotli\n", + "import json\n", + "from pathlib import Path" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# fname = \"driving_sample\"\n", + "fname = \"construction\" # use this one if you want to include aedat and eventstream benchmarks\n", + "\n", + "# where to download and generate all the benchmark data\n", + "folder = Path(\"data/file-benchmark\")\n", + "folder.mkdir(parents=True, exist_ok=True)\n", + "\n", + "# key is the name of the encoding, value is the file name ending\n", + "extension_map = {\n", + " \"aedat\": \".aedat4\",\n", + " \"dat\": \".dat\",\n", + " \"evt2\": \"_evt2.raw\",\n", + " \"evt3\": \"_evt3.raw\",\n", + " \"hdf5\": \".hdf5\",\n", + " \"hdf5_lzf\": \"_lzf.hdf5\",\n", + " \"hdf5_gzip\": \"_gzip.hdf5\",\n", + " \"numpy\": \".npy\",\n", + " \"loris\": \".es\",\n", + " \"eventstream\": \".es\",\n", + " \"brotli\": \".bin.br\",\n", + " \"undr_numpy\": \".dvs\",\n", + " \"undr_brotli_11\": \".11.dvs.br\",\n", + " \"undr_brotli_6\": \".6.dvs.br\",\n", + " \"undr_brotli_1\": \".1.dvs.br\",\n", + "}\n", + "get_fpath = lambda encoding: f\"{folder}/{fname}{extension_map[encoding]}\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Download the 'base' files\n", + "These are the files with the original data, which will be loaded and then converted to all other formats under test. Currently you can choose between events from a Prophesee raw evt3 or an aedat4 sample file. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def download_file_from_url(file_path, url):\n", + " print(f\"Downloading file to {file_path}... \")\n", + " r = requests.get(\n", + " url,\n", + " allow_redirects=True,\n", + " )\n", + " with open(f\"{file_path}.download\", \"wb\") as file:\n", + " file.write(r.content)\n", + " r.raise_for_status()\n", + " os.rename(f\"{file_path}.download\", file_path)\n", + " print(\"done!\")\n", + "\n", + "\n", + "if fname == \"driving_sample\":\n", + " fpath = get_fpath(\"evt3\")\n", + " if not Path(fpath).is_file():\n", + " download_file_from_url(\n", + " fpath, \"https://dataset.prophesee.ai/index.php/s/nVcLLdWAnNzrmII/download\"\n", + " )\n", + " wizard = Wizard(encoding=\"evt3\")\n", + " events_ti8_xi2_yi2_pu1 = wizard.read(fpath)\n", + "\n", + "\n", + "if fname == \"construction\":\n", + " aedat_fpath = get_fpath(\"aedat\")\n", + " if not Path(aedat_fpath).is_file():\n", + " download_file_from_url(\n", + " aedat_fpath,\n", + " \"https://cloudstor.aarnet.edu.au/plus/s/ORQ2oOz9NfwiHLZ/download?path=%2F&files=construction.aedat4\",\n", + " )\n", + " decoder = aedat.Decoder(aedat_fpath) # type: ignore\n", + " width = 0\n", + " height = 0\n", + " for stream in decoder.id_to_stream().values():\n", + " if stream[\"type\"] == \"events\":\n", + " width = stream[\"width\"]\n", + " height = stream[\"height\"]\n", + " break\n", + " assert width != 0\n", + " assert height != 0\n", + " events_tu8_xu2_yu2_onb = np.concatenate(\n", + " [packet[\"events\"] for packet in decoder if \"events\" in packet]\n", + " )\n", + " assert np.count_nonzero(np.diff(events_tu8_xu2_yu2_onb[\"t\"].astype(\" str:\n", + " if normalize_time:\n", + " events[\"t\"] -= events[\"t\"][0]\n", + " result = hashlib.sha3_224()\n", + " result.update(events.astype(np.dtype([(\"t\", \" np.ndarray:\n", + " wizard = Wizard(encoding)\n", + " wizard.set_file(fpath)\n", + " return wizard.read(fpath)\n", + " assert hash(expelliarmus_read()) == reference_hash\n", + " expelliarmus_times.append(timeit.timeit(expelliarmus_read, number=REPEAT) / REPEAT)\n", + " expelliarmus_sizes.append(get_fsize_MiB(Path(fpath)))\n", + "\n", + "# hdf5 variants\n", + "hdf5_times = []\n", + "hdf5_sizes = []\n", + "for encoding in hdf5_encodings:\n", + " print(f\"Benchmarking HDF5 ({encoding}).\")\n", + " gc.collect()\n", + " fpath = get_fpath(encoding)\n", + " def hdf5_read() -> np.ndarray:\n", + " with h5py.File(fpath) as file:\n", + " return file[\"events\"][:] # type: ignore\n", + " assert hash(hdf5_read()) == reference_hash\n", + " hdf5_times.append(timeit.timeit(hdf5_read, number=REPEAT) / REPEAT)\n", + " hdf5_sizes.append(get_fsize_MiB(Path(fpath)))\n", + "\n", + "# brotli (UNDR)\n", + "brotli_times = []\n", + "brotli_sizes = []\n", + "for quality in brotli_qualities:\n", + " print(f\"Benchmarking Brotli (Q={quality}).\")\n", + " gc.collect()\n", + " fpath = get_fpath(f\"undr_brotli_{quality}\")\n", + " def brotli_read() -> np.ndarray:\n", + " with open(fpath, \"rb\") as file:\n", + " return np.frombuffer(brotli.decompress(file.read()), dtype=np.dtype([(\"t\", \" np.ndarray:\n", + " return np.load(fpath)\n", + "numpy_time = timeit.timeit(numpy_read, number=REPEAT) / REPEAT\n", + "numpy_size = get_fsize_MiB(Path(fpath))\n", + "\n", + "# numpy (UNDR)\n", + "print(\"Benchmarking NumPy (UNDR).\")\n", + "gc.collect()\n", + "fpath = get_fpath(\"undr_numpy\")\n", + "def undr_numpy_read() -> np.ndarray:\n", + " return np.fromfile(fpath, dtype=np.dtype([(\"t\", \" np.ndarray:\n", + " decoder = aedat.Decoder(fpath) # type: ignore\n", + " return np.concatenate([packet[\"events\"] for packet in decoder if \"events\" in packet])\n", + "assert hash(aedat_read(), normalize_time=True) == reference_hash\n", + "aedat_time = timeit.timeit(aedat_read, number=REPEAT)/ REPEAT\n", + "aedat_size = get_fsize_MiB(Path(fpath))\n", + "\n", + "# loris\n", + "print(\"Benchmarking loris.\")\n", + "gc.collect()\n", + "fpath = get_fpath(\"loris\")\n", + "def loris_read() -> np.ndarray:\n", + " return loris.read_file(fpath)[\"events\"] # type: ignore\n", + "assert hash(loris_read()) == reference_hash\n", + "loris_time = timeit.timeit(loris_read, number=REPEAT) / REPEAT\n", + "loris_size = get_fsize_MiB(Path(fpath))\n", + "\n", + "# eventstream\n", + "print(\"Benchmarking eventstream.\")\n", + "gc.collect()\n", + "fpath = get_fpath(\"eventstream\")\n", + "def eventstream_read() -> np.ndarray:\n", + " with event_stream.Decoder(fpath) as decoder:\n", + " return np.concatenate([packet for packet in decoder])\n", + "assert hash(eventstream_read()) == reference_hash\n", + "eventstream_time = timeit.timeit(eventstream_read, number=REPEAT) / REPEAT\n", + "eventstream_size = get_fsize_MiB(Path(fpath))\n", + "\n", + "# aestream\n", + "print(\"Benchmarking AEStream.\")\n", + "gc.collect()\n", + "fpath = get_fpath(\"dat\")\n", + "def aestream_read() -> np.ndarray:\n", + " return aestream.FileInput(fpath, (640, 480)).load()\n", + "\n", + "assert hash(aestream_read()) == reference_hash\n", + "aestream_time = timeit.timeit(aestream_read, number=REPEAT) / REPEAT\n", + "aestream_size = get_fsize_MiB(Path(fpath))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import dv_processing as dv\n", + "\n", + "print(\"Benchmarking DV.\")\n", + "gc.collect()\n", + "fpath = str(folder / \"construction_rewritten.aedat4\")\n", + "\n", + "def dv_read() -> np.ndarray:\n", + " reader = dv.io.MonoCameraRecording(fpath)\n", + " event_slices = []\n", + " while reader.isRunning():\n", + " slice = reader.getNextEventBatch()\n", + " if slice is None:\n", + " break\n", + " event_slices.append(slice.numpy())\n", + " return np.concatenate(event_slices)\n", + "\n", + "dv_time = timeit.timeit(dv_read, number=REPEAT) / REPEAT\n", + "dv_size = get_fsize_MiB(Path(fpath))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Save the results\n", + "\n", + "results = (\n", + " list(\n", + " zip(\n", + " raw_encodings,\n", + " [\"expelliarmus\"] * len(raw_encodings),\n", + " expelliarmus_times,\n", + " expelliarmus_sizes,\n", + " )\n", + " )\n", + " + list(zip(hdf5_encodings, [\"h5py\"] * len(hdf5_encodings), hdf5_times, hdf5_sizes))\n", + " + list(\n", + " zip(\n", + " [f\"numpy/brotli (Q={quality})\" for quality in brotli_qualities],\n", + " [\"numpy/brotli\"] * len(brotli_qualities),\n", + " brotli_times,\n", + " brotli_sizes,\n", + " )\n", + " )\n", + " + [\n", + " (\"numpy (pickle)\", \"numpy\", numpy_time, numpy_size),\n", + " (\"numpy (UNDR)\", \"numpy\", undr_numpy_time, undr_numpy_size),\n", + " (\"aedat4\", \"aedat\", aedat_time, aedat_size),\n", + " (\"aedat4\", \"DV\", dv_time, dv_size),\n", + " (\"eventstream\", \"loris\", loris_time, loris_size),\n", + " (\"eventstream\", \"event_stream\", eventstream_time, eventstream_size),\n", + " (\"dat\", \"AEStream\", aestream_time, aestream_size),\n", + " ]\n", + ")\n", + "\n", + "with open(\"results.json\", \"w\") as results_file:\n", + " json.dump(results, results_file, indent=4)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "## Plot results\n", + "\n", + "import pandas\n", + "import plotly.express\n", + "\n", + "with open(\"results.json\") as results_file:\n", + " results = json.load(results_file)\n", + "dataframe = pandas.DataFrame(\n", + " {\n", + " \"Encoding\": [result[0] for result in results],\n", + " \"Framework\": [result[1] for result in results],\n", + " \"Read time [s]\": [result[2] for result in results],\n", + " \"File size [MiB]\": [result[3] for result in results],\n", + " }\n", + ")\n", + "\n", + "title = f\"Reading the same {round(number_of_events / 1e6)} million events from different file formats.\"\n", + "\n", + "figure = plotly.express.scatter(\n", + " dataframe,\n", + " x=\"Read time [s]\",\n", + " y=\"File size [MiB]\",\n", + " color=\"Framework\",\n", + " symbol=\"Encoding\",\n", + " template=\"plotly_dark\",\n", + " title=title,\n", + ")\n", + "figure.update_traces(marker_size=13)\n", + "figure.update_layout(height=600, width=900)\n", + "figure.write_image(\"file_read_benchmark.png\")\n", + "\n", + "\n", + "figure = plotly.express.scatter(\n", + " dataframe,\n", + " x=\"Read time [s]\",\n", + " y=\"File size [MiB]\",\n", + " color=\"Framework\",\n", + " symbol=\"Encoding\",\n", + " template=\"plotly_white\",\n", + ")\n", + "figure.update_traces(marker_size=13)\n", + "figure.update_layout(height=400, width=1000, margin=dict(l=10,r=10,b=10,t=10),)\n", + "figure.write_image(\"file_read_benchmark_white.png\")\n", + "\n", + "figure = plotly.express.scatter(\n", + " dataframe,\n", + " x=\"Read time [s]\",\n", + " y=\"File size [MiB]\",\n", + " color=\"Framework\",\n", + " symbol=\"Encoding\",\n", + " template=\"plotly_dark\",\n", + " title=title,\n", + " log_x=True,\n", + " log_y=True,\n", + ")\n", + "figure.update_traces(marker_size=13)\n", + "figure.update_layout(height=600, width=900)\n", + "figure.write_image(\"file_read_benchmark_log.png\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + }, + "vscode": { + "interpreter": { + "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/p/efficient-compression-for-event-based-data/requirements.txt b/p/efficient-compression-for-event-based-data/requirements.txt new file mode 100644 index 00000000..3b6604d4 --- /dev/null +++ b/p/efficient-compression-for-event-based-data/requirements.txt @@ -0,0 +1,12 @@ +expelliarmus +aedat +event_stream +h5py +numpy +requests +loris +brotli +plotly +pandas +kaleido +git+https://github.com/norse/aestream diff --git a/p/efficient-compression-for-event-based-data/results.json b/p/efficient-compression-for-event-based-data/results.json new file mode 100644 index 00000000..31926498 --- /dev/null +++ b/p/efficient-compression-for-event-based-data/results.json @@ -0,0 +1,98 @@ +[ + [ + "dat", + "expelliarmus", + 0.4974973459728062, + 831 + ], + [ + "evt2", + "expelliarmus", + 1.2683103151619435, + 514 + ], + [ + "evt3", + "expelliarmus", + 1.348788413591683, + 615 + ], + [ + "hdf5", + "h5py", + 0.6337204953655601, + 1662 + ], + [ + "hdf5_lzf", + "h5py", + 3.5057859893888237, + 961 + ], + [ + "hdf5_gzip", + "h5py", + 5.9625208144076165, + 552 + ], + [ + "numpy/brotli (Q=1)", + "numpy/brotli", + 7.147578082233667, + 495 + ], + [ + "numpy/brotli (Q=6)", + "numpy/brotli", + 6.596753144916147, + 475 + ], + [ + "numpy/brotli (Q=11)", + "numpy/brotli", + 8.44624579353258, + 402 + ], + [ + "numpy (pickle)", + "numpy", + 0.3102571479976177, + 1662 + ], + [ + "numpy (UNDR)", + "numpy", + 0.20928959660232066, + 1350 + ], + [ + "aedat4", + "aedat", + 5.702251439262182, + 1015 + ], + [ + "aedat4", + "DV", + 5.626591887976974, + 901 + ], + [ + "eventstream", + "loris", + 2.162490014266223, + 520 + ], + [ + "eventstream", + "event_stream", + 2.5804718364030124, + 520 + ], + [ + "dat", + "AEStream", + 0.9845915909856557, + 831 + ] +] \ No newline at end of file diff --git a/p/open-neuromorphic/ONM.png b/p/open-neuromorphic/ONM.png new file mode 100644 index 00000000..bc5eadbf Binary files /dev/null and b/p/open-neuromorphic/ONM.png differ diff --git a/p/open-neuromorphic/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_120x120_fill_box_smart1_3.png b/p/open-neuromorphic/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_120x120_fill_box_smart1_3.png new file mode 100644 index 00000000..cd761e89 Binary files /dev/null and b/p/open-neuromorphic/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_120x120_fill_box_smart1_3.png differ diff --git a/p/open-neuromorphic/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_1600x0_resize_box_3.png b/p/open-neuromorphic/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_1600x0_resize_box_3.png new file mode 100644 index 00000000..f4226f6f Binary files /dev/null and b/p/open-neuromorphic/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_1600x0_resize_box_3.png differ diff --git a/p/open-neuromorphic/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_800x0_resize_box_3.png b/p/open-neuromorphic/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_800x0_resize_box_3.png new file mode 100644 index 00000000..6f390350 Binary files /dev/null and b/p/open-neuromorphic/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_800x0_resize_box_3.png differ diff --git a/p/open-neuromorphic/index.html b/p/open-neuromorphic/index.html new file mode 100644 index 00000000..755b1a5b --- /dev/null +++ b/p/open-neuromorphic/index.html @@ -0,0 +1,9 @@ +Open Neuromorphic
Featured image of post Open Neuromorphic

Open Neuromorphic

Organization that aims at providing one place to reference all relevant open-source project in the neuromorphic research domain.

This organisation is created by a loose collective of open source collaborators across academia, industry and individual contributors. What connects us is the love for building tools that can be used in the neuromorphic community and we want to share ownership of this vision.

Open Neuromorphic (ONM) provides the following things:

  1. A curated list of software frameworks to make it easier to find the tool you need.
  2. A platform for your code. If you wish to create a new repository or migrate your existing code to ONM, please get in touch with us.
  3. Educational content to get you started in the neuromorphic world.
  4. Events about neuromorphic research and software, with contributions from both academia and industry.

Projects that we list here can fall into this non-exclusive list of categories:

  • Spiking Neural Networks (SNNs) training and/or inference, for both ML and neuroscience application.
  • Event-based sensors data handling.
  • Digital hardware designs for neuromorphic applications.
  • Mixed-signal hardware designs for neuromorphic applications.

Get in touch with us if you wish to give a talk, write an article or to know more about the neuromorphic world.

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/p/snn-library-benchmarks/data-generation.ipynb b/p/snn-library-benchmarks/data-generation.ipynb new file mode 100644 index 00000000..c2815b46 --- /dev/null +++ b/p/snn-library-benchmarks/data-generation.ipynb @@ -0,0 +1,484 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Installation\n", + "Start with creating a new Conda environment\n", + "```\n", + "conda create -n frameworks pip\n", + "conda activate frameworks\n", + "```\n", + "Then install PyTorch (adjust for your CUDA version). Instructions available [here](https://pytorch.org/get-started/locally/)\n", + "```\n", + "conda install pytorch torchvision torchaudio pytorch-cuda=11.7 -c pytorch -c nvidia\n", + "```\n", + "Install the benchmarked frameworks from PyPI\n", + "```\n", + "pip install -r requirements.txt\n", + "```\n", + "\n", + "This benchmark code is an adaptation of Rockpool's [benchmark script](https://gitlab.com/synsense/rockpool/-/blob/develop/rockpool/utilities/benchmarking/benchmark_utils.py?ref_type=heads). " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "import numpy as np\n", + "from utils import timeit, benchmark_framework" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def rockpool_torch():\n", + " from rockpool.nn.modules import LIFTorch, LinearTorch\n", + " from rockpool.nn.combinators import Sequential\n", + " import rockpool\n", + "\n", + " benchmark_title = f\"Rockpool
v{rockpool.__version__}\"\n", + "\n", + " def prepare_fn(batch_size, n_steps, n_neurons, n_layers, device):\n", + " model = Sequential(\n", + " LinearTorch(shape=(n_neurons, n_neurons)),\n", + " LIFTorch(n_neurons),\n", + " ).to(device)\n", + " input_static = torch.randn(batch_size, n_steps, n_neurons).to(device)\n", + " with torch.no_grad():\n", + " model(input_static)\n", + " return dict(model=model, input=input_static, n_neurons=n_neurons)\n", + "\n", + " def forward_fn(bench_dict):\n", + " model, input_static = bench_dict[\"model\"], bench_dict[\"input\"]\n", + " output = model(input_static)[0]\n", + " bench_dict[\"output\"] = output\n", + " return bench_dict\n", + "\n", + " def backward_fn(bench_dict):\n", + " output = bench_dict[\"output\"]\n", + " loss = output.sum()\n", + " loss.backward(retain_graph=True)\n", + "\n", + " return prepare_fn, forward_fn, backward_fn, benchmark_title" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def rockpool_exodus():\n", + " from rockpool.nn.modules import LIFExodus, LinearTorch\n", + " from rockpool.nn.combinators import Sequential\n", + " import rockpool\n", + "\n", + " benchmark_title = f\"Rockpool EXODUS
v{rockpool.__version__}\"\n", + "\n", + " def prepare_fn(batch_size, n_steps, n_neurons, n_layers, device):\n", + " model = Sequential(\n", + " LinearTorch(shape=(n_neurons, n_neurons)),\n", + " LIFExodus(n_neurons),\n", + " ).to(device)\n", + " input_static = torch.randn(batch_size, n_steps, n_neurons).to(device)\n", + " with torch.no_grad():\n", + " model(input_static)\n", + " return dict(model=model, input=input_static, n_neurons=n_neurons)\n", + "\n", + " def forward_fn(bench_dict):\n", + " model, input_static = bench_dict[\"model\"], bench_dict[\"input\"]\n", + " output = model(input_static)[0]\n", + " bench_dict[\"output\"] = output\n", + " return bench_dict\n", + "\n", + " def backward_fn(bench_dict):\n", + " output = bench_dict[\"output\"]\n", + " loss = output.sum()\n", + " loss.backward(retain_graph=True)\n", + "\n", + " return prepare_fn, forward_fn, backward_fn, benchmark_title" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def sinabs():\n", + " from sinabs.layers import LIF\n", + " import sinabs\n", + "\n", + " benchmark_title = f\"Sinabs
v{sinabs.__version__}\"\n", + "\n", + " def prepare_fn(batch_size, n_steps, n_neurons, n_layers, device):\n", + " model = nn.Sequential(\n", + " nn.Linear(n_neurons, n_neurons),\n", + " LIF(tau_mem=torch.tensor(10.0)),\n", + " ).to(device)\n", + " input_static = torch.randn(batch_size, n_steps, n_neurons).to(device)\n", + " with torch.no_grad():\n", + " model(input_static)\n", + " return dict(model=model, input=input_static, n_neurons=n_neurons)\n", + "\n", + " def forward_fn(bench_dict):\n", + " model, input_static = bench_dict[\"model\"], bench_dict[\"input\"]\n", + " sinabs.reset_states(model)\n", + " bench_dict[\"output\"] = model(input_static)\n", + " return bench_dict\n", + "\n", + " def backward_fn(bench_dict):\n", + " output = bench_dict[\"output\"]\n", + " loss = output.sum()\n", + " loss.backward(retain_graph=True)\n", + "\n", + " return prepare_fn, forward_fn, backward_fn, benchmark_title" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def sinabs_exodus():\n", + " from sinabs.exodus.layers import LIF\n", + " import sinabs\n", + "\n", + " benchmark_title = f\"Sinabs EXODUS
v{sinabs.exodus.__version__}\"\n", + "\n", + " def prepare_fn(batch_size, n_steps, n_neurons, n_layers, device):\n", + " model = nn.Sequential(\n", + " nn.Linear(n_neurons, n_neurons),\n", + " LIF(tau_mem=torch.tensor(10.0)),\n", + " ).to(device)\n", + " input_static = torch.randn(batch_size, n_steps, n_neurons).to(device)\n", + " with torch.no_grad():\n", + " model(input_static)\n", + " return dict(model=model, input=input_static, n_neurons=n_neurons)\n", + "\n", + " def forward_fn(bench_dict):\n", + " model, input_static = bench_dict[\"model\"], bench_dict[\"input\"]\n", + " sinabs.reset_states(model)\n", + " bench_dict[\"output\"] = model(input_static)\n", + " return bench_dict\n", + "\n", + " def backward_fn(bench_dict):\n", + " output = bench_dict[\"output\"]\n", + " loss = output.sum()\n", + " loss.backward(retain_graph=True)\n", + "\n", + " return prepare_fn, forward_fn, backward_fn, benchmark_title" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def norse():\n", + " from norse.torch.module.lif import LIF\n", + " from norse.torch import SequentialState\n", + " import norse\n", + "\n", + " benchmark_title = f\"Norse
v{norse.__version__}\"\n", + "\n", + " def prepare_fn(batch_size, n_steps, n_neurons, n_layers, device):\n", + " model = SequentialState(\n", + " nn.Linear(n_neurons, n_neurons),\n", + " LIF(),\n", + " )\n", + " # model = torch.compile(model, mode=\"max-autotune\")\n", + " model = model.to(device)\n", + " input_static = torch.randn(n_steps, batch_size, n_neurons).to(device)\n", + " with torch.no_grad():\n", + " model(input_static)\n", + " # output.sum().backward() # JIT compile everything\n", + " return dict(model=model, input=input_static, n_neurons=n_neurons)\n", + "\n", + " def forward_fn(bench_dict):\n", + " model, input_static = bench_dict[\"model\"], bench_dict[\"input\"]\n", + " bench_dict[\"output\"] = model(input_static)[0]\n", + " return bench_dict\n", + "\n", + " def backward_fn(bench_dict):\n", + " output = bench_dict[\"output\"]\n", + " loss = output.sum()\n", + " loss.backward(retain_graph=True)\n", + "\n", + " return prepare_fn, forward_fn, backward_fn, benchmark_title" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def snntorch():\n", + " import snntorch\n", + "\n", + " benchmark_title = f\"snnTorch
v{snntorch.__version__}\"\n", + "\n", + " def prepare_fn(batch_size, n_steps, n_neurons, n_layers, device):\n", + " class Model(nn.Module):\n", + " def __init__(self, beta: float = 0.95):\n", + " super().__init__()\n", + " self.fc = nn.Linear(n_neurons, n_neurons)\n", + " self.lif = snntorch.Leaky(beta=beta)\n", + " self.mem = self.lif.init_leaky()\n", + "\n", + " def forward(self, x):\n", + " output = []\n", + " mem = self.mem\n", + " for inp in x:\n", + " cur = self.fc(inp)\n", + " spk, mem = self.lif(cur, mem)\n", + " output.append(spk)\n", + " return torch.stack(output)\n", + "\n", + " model = Model()\n", + " # model = torch.compile(model, mode=\"max-autotune\")\n", + " model = model.to(device)\n", + " input_static = torch.randn(n_steps, batch_size, n_neurons).to(device)\n", + " with torch.no_grad():\n", + " model(input_static)\n", + " return dict(model=model, input=input_static, n_neurons=n_neurons)\n", + "\n", + " def forward_fn(bench_dict):\n", + " model, input_static = bench_dict[\"model\"], bench_dict[\"input\"]\n", + " bench_dict[\"output\"] = model(input_static)\n", + " return bench_dict\n", + "\n", + " def backward_fn(bench_dict):\n", + " output = bench_dict[\"output\"]\n", + " loss = output.sum()\n", + " loss.backward(retain_graph=True)\n", + "\n", + " return prepare_fn, forward_fn, backward_fn, benchmark_title" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# mix of https://spikingjelly.readthedocs.io/zh_CN/latest/activation_based_en/basic_concept.html#step-mode\n", + "# and https://github.com/fangwei123456/spikingjelly/blob/master/spikingjelly/activation_based/examples/rsnn_sequential_fmnist.py\n", + "def spikingjelly():\n", + " from spikingjelly.activation_based import neuron, surrogate, functional, layer\n", + "\n", + " benchmark_title = f\"SpikingJelly PyTorch
v0.0.0.0.15\"\n", + "\n", + " def prepare_fn(batch_size, n_steps, n_neurons, n_layers, device):\n", + " class Model(nn.Module):\n", + " def __init__(self, tau=5.0):\n", + " super().__init__()\n", + " self.model = nn.Sequential(\n", + " layer.Linear(n_neurons, n_neurons),\n", + " neuron.LIFNode(tau=tau, surrogate_function=surrogate.ATan(), step_mode='m'),\n", + " )\n", + "\n", + " def forward(self, x):\n", + " functional.reset_net(self.model)\n", + " return self.model(x)\n", + "\n", + " model = Model().to(device)\n", + " input_static = torch.randn(n_steps, batch_size, n_neurons).to(device)\n", + " with torch.no_grad():\n", + " model(input_static)\n", + " return dict(model=model, input=input_static, n_neurons=n_neurons)\n", + "\n", + " def forward_fn(bench_dict):\n", + " model, input_static = bench_dict[\"model\"], bench_dict[\"input\"]\n", + " bench_dict[\"output\"] = model(input_static)\n", + " return bench_dict\n", + "\n", + " def backward_fn(bench_dict):\n", + " output = bench_dict[\"output\"]\n", + " loss = output.sum()\n", + " loss.backward(retain_graph=True)\n", + "\n", + " return prepare_fn, forward_fn, backward_fn, benchmark_title\n", + "\n", + "\n", + "def spikingjelly_cupy():\n", + " from spikingjelly.activation_based import neuron, surrogate, functional, layer\n", + "\n", + " benchmark_title = f\"SpikingJelly CuPy
v0.0.0.0.15\"\n", + "\n", + " def prepare_fn(batch_size, n_steps, n_neurons, n_layers, device):\n", + " class Model(nn.Module):\n", + " def __init__(self, tau=5.0):\n", + " super().__init__()\n", + " self.model = nn.Sequential(\n", + " layer.Linear(n_neurons, n_neurons),\n", + " neuron.LIFNode(tau=tau, surrogate_function=surrogate.ATan(), step_mode='m'),\n", + " )\n", + " functional.set_backend(self.model, backend='cupy')\n", + "\n", + " def forward(self, x):\n", + " functional.reset_net(self.model)\n", + " return self.model(x)\n", + "\n", + " model = Model().to(device)\n", + " input_static = torch.randn(n_steps, batch_size, n_neurons).to(device)\n", + " with torch.no_grad():\n", + " model(input_static)\n", + " return dict(model=model, input=input_static, n_neurons=n_neurons)\n", + "\n", + " def forward_fn(bench_dict):\n", + " model, input_static = bench_dict[\"model\"], bench_dict[\"input\"]\n", + " bench_dict[\"output\"] = model(input_static)\n", + " return bench_dict\n", + "\n", + " def backward_fn(bench_dict):\n", + " output = bench_dict[\"output\"]\n", + " loss = output.sum()\n", + " loss.backward(retain_graph=True)\n", + "\n", + " return prepare_fn, forward_fn, backward_fn, benchmark_title" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def lava():\n", + " import lava.lib.dl.slayer as slayer\n", + "\n", + " benchmark_title = f\"Lava DL
v0.4.0.dev0\"\n", + "\n", + " def prepare_fn(batch_size, n_steps, n_neurons, n_layers, device):\n", + " neuron_params = {\n", + " 'threshold' : 0.1,\n", + " 'current_decay' : 1,\n", + " 'voltage_decay' : 0.1,\n", + " 'requires_grad' : True, \n", + " }\n", + "\n", + " model = slayer.block.cuba.Dense(neuron_params, n_neurons, n_neurons).to(device)\n", + " input_static = torch.randn(batch_size, n_neurons, n_steps).to(device)\n", + " with torch.no_grad():\n", + " model(input_static)\n", + " return dict(model=model, input=input_static, n_neurons=n_neurons)\n", + "\n", + " def forward_fn(bench_dict):\n", + " model, input_static = bench_dict[\"model\"], bench_dict[\"input\"]\n", + " bench_dict[\"output\"] = model(input_static)\n", + " return bench_dict\n", + "\n", + " def backward_fn(bench_dict):\n", + " output = bench_dict[\"output\"]\n", + " loss = output.sum()\n", + " loss.backward(retain_graph=True)\n", + "\n", + " return prepare_fn, forward_fn, backward_fn, benchmark_title" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "batch_size = 10\n", + "n_steps = 500\n", + "n_layers = 1 # doesn't do anything at the moment\n", + "device = \"cuda\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data = []\n", + "for benchmark in [spikingjelly_cupy, rockpool_torch, rockpool_exodus, sinabs, sinabs_exodus, snntorch, norse, lava,]: # spikingjelly\n", + "# for benchmark in [ norse, snntorch,]:\n", + " for n_neurons in [512, 4096, 16384, ]: # 1024, 2048, 4096, 8192, 16384,\n", + " prepare_fn, forward_fn, backward_fn, bench_desc = benchmark()\n", + " print(\"Benchmarking\", bench_desc, \"with n_neurons =\", n_neurons)\n", + " forward_times, backward_times = benchmark_framework(\n", + " prepare_fn=prepare_fn,\n", + " forward_fn=forward_fn,\n", + " backward_fn=backward_fn,\n", + " benchmark_desc=bench_desc,\n", + " n_neurons=n_neurons,\n", + " n_layers=n_layers,\n", + " n_steps=n_steps,\n", + " batch_size=batch_size,\n", + " device=device,\n", + " )\n", + " data.append(\n", + " [\n", + " bench_desc,\n", + " np.array(forward_times).mean(),\n", + " np.array(backward_times).mean(),\n", + " n_neurons,\n", + " ]\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "df = pd.DataFrame(data, columns=[\"framework\", \"forward\", \"backward\", \"neurons\"])\n", + "df = df.melt(\n", + " id_vars=[\"framework\", \"neurons\"],\n", + " value_vars=[\"forward\", \"backward\"],\n", + " var_name=\"pass\",\n", + " value_name=\"time [s]\",\n", + ")\n", + "df.to_csv(\"data.csv\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "frameworks", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/p/snn-library-benchmarks/data.csv b/p/snn-library-benchmarks/data.csv new file mode 100644 index 00000000..f327a958 --- /dev/null +++ b/p/snn-library-benchmarks/data.csv @@ -0,0 +1,49 @@ +,framework,neurons,pass,time [s] +0,SpikingJelly CuPy
v0.0.0.0.15,512,forward,0.0007805065210099058 +1,SpikingJelly CuPy
v0.0.0.0.15,4096,forward,0.0124769107795056 +2,SpikingJelly CuPy
v0.0.0.0.15,16384,forward,0.027920798821882767 +3,Rockpool
v2.6,512,forward,0.37688859303792316 +4,Rockpool
v2.6,4096,forward,0.39487791061401367 +5,Rockpool
v2.6,16384,forward,0.8107947508494059 +6,Rockpool EXODUS
v2.6,512,forward,0.002076975900683695 +7,Rockpool EXODUS
v2.6,4096,forward,0.06692159175872803 +8,Rockpool EXODUS
v2.6,16384,forward,0.5919832587242126 +9,Sinabs
v1.2.8,512,forward,0.06987739431446996 +10,Sinabs
v1.2.8,4096,forward,0.0824661922454834 +11,Sinabs
v1.2.8,16384,forward,0.5325368642807007 +12,Sinabs EXODUS
v1.1.2,512,forward,0.0015409472176033101 +13,Sinabs EXODUS
v1.1.2,4096,forward,0.05830531120300293 +14,Sinabs EXODUS
v1.1.2,16384,forward,0.596237301826477 +15,snnTorch
v0.7.0,512,forward,0.12985163927078247 +16,snnTorch
v0.7.0,4096,forward,0.20558152198791504 +17,snnTorch
v0.7.0,16384,forward,2.07729172706604 +18,Norse
v1.0.0,512,forward,0.12789078056812286 +19,Norse
v1.0.0,4096,forward,0.12744338810443878 +20,Norse
v1.0.0,16384,forward,0.5510208606719971 +21,Lava DL
v0.4.0.dev0,512,forward,0.00212576589472869 +22,Lava DL
v0.4.0.dev0,4096,forward,0.06678892771402994 +23,Lava DL
v0.4.0.dev0,16384,forward, +24,SpikingJelly CuPy
v0.0.0.0.15,512,backward,0.000706080198708752 +25,SpikingJelly CuPy
v0.0.0.0.15,4096,backward,0.030669746976910217 +26,SpikingJelly CuPy
v0.0.0.0.15,16384,backward,0.4747912883758545 +27,Rockpool
v2.6,512,backward,0.21001343727111815 +28,Rockpool
v2.6,4096,backward,0.9924057324727377 +29,Rockpool
v2.6,16384,backward,4.060965855916341 +30,Rockpool EXODUS
v2.6,512,backward,0.003395784911462816 +31,Rockpool EXODUS
v2.6,4096,backward,0.04521534177992079 +32,Rockpool EXODUS
v2.6,16384,backward,0.10865803559621175 +33,Sinabs
v1.2.8,512,backward,0.11612474918365479 +34,Sinabs
v1.2.8,4096,backward,0.7617576917012533 +35,Sinabs
v1.2.8,16384,backward,3.246778726577759 +36,Sinabs EXODUS
v1.1.2,512,backward,0.004442035755402762 +37,Sinabs EXODUS
v1.1.2,4096,backward,0.04086161613464356 +38,Sinabs EXODUS
v1.1.2,16384,backward,0.09781840214362511 +39,snnTorch
v0.7.0,512,backward,0.08350504438082378 +40,snnTorch
v0.7.0,4096,backward,0.3494421641031901 +41,snnTorch
v0.7.0,16384,backward,5.430537621180217 +42,Norse
v1.0.0,512,backward,0.1313396394252777 +43,Norse
v1.0.0,4096,backward,0.4413779258728027 +44,Norse
v1.0.0,16384,backward,2.0151825745900473 +45,Lava DL
v0.4.0.dev0,512,backward,0.002021573529098973 +46,Lava DL
v0.4.0.dev0,4096,backward,0.049568972936490686 +47,Lava DL
v0.4.0.dev0,16384,backward, diff --git a/p/snn-library-benchmarks/framework-benchmarking-16k-header.png b/p/snn-library-benchmarks/framework-benchmarking-16k-header.png new file mode 100644 index 00000000..542d482c Binary files /dev/null and b/p/snn-library-benchmarks/framework-benchmarking-16k-header.png differ diff --git a/p/snn-library-benchmarks/framework-benchmarking-16k-header_hu1edf738c777df89dc03276a414ed6729_53681_120x120_fill_box_smart1_3.png b/p/snn-library-benchmarks/framework-benchmarking-16k-header_hu1edf738c777df89dc03276a414ed6729_53681_120x120_fill_box_smart1_3.png new file mode 100644 index 00000000..6f97cd96 Binary files /dev/null and b/p/snn-library-benchmarks/framework-benchmarking-16k-header_hu1edf738c777df89dc03276a414ed6729_53681_120x120_fill_box_smart1_3.png differ diff --git a/p/snn-library-benchmarks/framework-benchmarking-16k-header_hu1edf738c777df89dc03276a414ed6729_53681_1600x0_resize_box_3.png b/p/snn-library-benchmarks/framework-benchmarking-16k-header_hu1edf738c777df89dc03276a414ed6729_53681_1600x0_resize_box_3.png new file mode 100644 index 00000000..834640c0 Binary files /dev/null and b/p/snn-library-benchmarks/framework-benchmarking-16k-header_hu1edf738c777df89dc03276a414ed6729_53681_1600x0_resize_box_3.png differ diff --git a/p/snn-library-benchmarks/framework-benchmarking-16k-header_hu1edf738c777df89dc03276a414ed6729_53681_800x0_resize_box_3.png b/p/snn-library-benchmarks/framework-benchmarking-16k-header_hu1edf738c777df89dc03276a414ed6729_53681_800x0_resize_box_3.png new file mode 100644 index 00000000..636e774b Binary files /dev/null and b/p/snn-library-benchmarks/framework-benchmarking-16k-header_hu1edf738c777df89dc03276a414ed6729_53681_800x0_resize_box_3.png differ diff --git a/p/snn-library-benchmarks/framework-benchmarking-16k.png b/p/snn-library-benchmarks/framework-benchmarking-16k.png new file mode 100644 index 00000000..0e291697 Binary files /dev/null and b/p/snn-library-benchmarks/framework-benchmarking-16k.png differ diff --git a/p/snn-library-benchmarks/framework-benchmarking-16k_hu75d870e7fed3f2c6b25b231575dd839f_52625_1024x0_resize_box_3.png b/p/snn-library-benchmarks/framework-benchmarking-16k_hu75d870e7fed3f2c6b25b231575dd839f_52625_1024x0_resize_box_3.png new file mode 100644 index 00000000..8f3756f9 Binary files /dev/null and b/p/snn-library-benchmarks/framework-benchmarking-16k_hu75d870e7fed3f2c6b25b231575dd839f_52625_1024x0_resize_box_3.png differ diff --git a/p/snn-library-benchmarks/framework-benchmarking-16k_hu75d870e7fed3f2c6b25b231575dd839f_52625_480x0_resize_box_3.png b/p/snn-library-benchmarks/framework-benchmarking-16k_hu75d870e7fed3f2c6b25b231575dd839f_52625_480x0_resize_box_3.png new file mode 100644 index 00000000..6421c736 Binary files /dev/null and b/p/snn-library-benchmarks/framework-benchmarking-16k_hu75d870e7fed3f2c6b25b231575dd839f_52625_480x0_resize_box_3.png differ diff --git a/p/snn-library-benchmarks/framework-benchmarking-4k.png b/p/snn-library-benchmarks/framework-benchmarking-4k.png new file mode 100644 index 00000000..47950ac6 Binary files /dev/null and b/p/snn-library-benchmarks/framework-benchmarking-4k.png differ diff --git a/p/snn-library-benchmarks/framework-benchmarking-4k_hu42c9aa17de8f2437fbfd6d286d22de35_70182_1024x0_resize_box_3.png b/p/snn-library-benchmarks/framework-benchmarking-4k_hu42c9aa17de8f2437fbfd6d286d22de35_70182_1024x0_resize_box_3.png new file mode 100644 index 00000000..8632347b Binary files /dev/null and b/p/snn-library-benchmarks/framework-benchmarking-4k_hu42c9aa17de8f2437fbfd6d286d22de35_70182_1024x0_resize_box_3.png differ diff --git a/p/snn-library-benchmarks/framework-benchmarking-4k_hu42c9aa17de8f2437fbfd6d286d22de35_70182_480x0_resize_box_3.png b/p/snn-library-benchmarks/framework-benchmarking-4k_hu42c9aa17de8f2437fbfd6d286d22de35_70182_480x0_resize_box_3.png new file mode 100644 index 00000000..4ae8cdd4 Binary files /dev/null and b/p/snn-library-benchmarks/framework-benchmarking-4k_hu42c9aa17de8f2437fbfd6d286d22de35_70182_480x0_resize_box_3.png differ diff --git a/p/snn-library-benchmarks/framework-benchmarking-512.png b/p/snn-library-benchmarks/framework-benchmarking-512.png new file mode 100644 index 00000000..968f5b89 Binary files /dev/null and b/p/snn-library-benchmarks/framework-benchmarking-512.png differ diff --git a/p/snn-library-benchmarks/framework-benchmarking-512_hubc4b897d813ea3eebeaa272ce0f2f6b6_69698_1024x0_resize_box_3.png b/p/snn-library-benchmarks/framework-benchmarking-512_hubc4b897d813ea3eebeaa272ce0f2f6b6_69698_1024x0_resize_box_3.png new file mode 100644 index 00000000..281362ef Binary files /dev/null and b/p/snn-library-benchmarks/framework-benchmarking-512_hubc4b897d813ea3eebeaa272ce0f2f6b6_69698_1024x0_resize_box_3.png differ diff --git a/p/snn-library-benchmarks/framework-benchmarking-512_hubc4b897d813ea3eebeaa272ce0f2f6b6_69698_480x0_resize_box_3.png b/p/snn-library-benchmarks/framework-benchmarking-512_hubc4b897d813ea3eebeaa272ce0f2f6b6_69698_480x0_resize_box_3.png new file mode 100644 index 00000000..bfdd7041 Binary files /dev/null and b/p/snn-library-benchmarks/framework-benchmarking-512_hubc4b897d813ea3eebeaa272ce0f2f6b6_69698_480x0_resize_box_3.png differ diff --git a/p/snn-library-benchmarks/index.html b/p/snn-library-benchmarks/index.html new file mode 100644 index 00000000..d3428292 --- /dev/null +++ b/p/snn-library-benchmarks/index.html @@ -0,0 +1,12 @@ +SNN library benchmarks
Featured image of post SNN library benchmarks

SNN library benchmarks

Comparing the most popular SNN frameworks for gradient-based optimization on top of PyTorch.

SNN library benchmarks

Open Neuromorphic’s list of SNN frameworks currently counts 10 libraries, and those are only the most popular ones! As the sizes of spiking neural network models grow thanks to deep learning, optimization becomes more important for researchers and practitioners alike. Training SNNs is often slow, as the stateful networks are typically fed sequential inputs. Today’s most popular training method then is some form of backpropagation through time, whose time complexity scales with the number of time steps. We benchmark libraries that all take slightly different approaches on how to extend PyTorch for gradient-based optimization of SNNs. While we focus on the time it takes to pass data forward and backward through the network, there are obviously other, non-tangible qualities of frameworks (extensibility, quality of documentation, ease of install, support for neuromorphic hardware …) that we’re not going to try to capture here. In our benchmarks, we use a single fully-connected (linear) and a leaky integrate and fire (LIF) layer. The input data has batch size of 10, 500 time steps and n neurons.

Comparison of time taken for forward and backward passes in different frameworks, for 512 neurons.

The first figure shows results for a small 512 neuron network. Overall, SpikingJelly is the fastest when using the CuPy backend, at just 1.5ms for both forward and backward call. The libraries that use an implementation of EXODUS (Sinabs / Rockpool) or SLAYER (Lava DL) equally benefit from custom CUDA code and vectorization across the time dimension in both forward and backward passes. It is noteworthy that such custom implementations exist for specific neuron models (such as the LIF under test), but not for arbitrary neuron models. Flexibility comes at a price, which is what frameworks such as snnTorch, Norse, Sinabs or Rockpool make use of. SpikingJelly also supports a conventional PyTorch GPU backend with which it’s possible to define neuron models more flexibly. Such implementations are also much easier to maintain and relying on the extensive testing of PyTorch means that it will likely work on a given machine configuration. Custom CUDA/CuPy backend implementations need to be compiled and then it is up to the maintainer to test it on different systems. On top of that, networks that are implemented in SLAYER, EXODUS or SpikingJelly with a CuPy backend cannot be executed on a CPU (unless converted).

Comparison of time taken for forward and backward passes in different frameworks, for 4k neurons.

Comparison of time taken for forward and backward passes in different frameworks, for 16k neurons.

When scaling up the number of neurons, the difference between performances becomes more evident. We notice that snnTorch has issues scaling up the forward and backward pass, and Lava DL goes out of memory (OOM) completely, potentially because of the use of conv3d kernels. SpikingJelly keeps its blazing fast forward pass, and EXODUS implementations have the quickest backward pass. SpikingJelly is more than 10 times faster than libraries that rely on pure PyTorch acceleration.

Summary

The ideal library will often depend on a multitude of factors, such as accessible documentation, usability of the API or pre-trained models. Generally speaking, PyTorch offers good support when custom neuron models (that have additional states, recurrence) are to be explored. For larger networks, it will likely pay off to rely on CUDA-accelerated existing implementations or implement CuPy backends for new neuron models. Yet another option is to experiment with torch.compile or CUDA graph replay, although that has not been tested here.

Code and comments

The code for this benchmark is available here. The order of dimensions in the input tensor and how it is fed to the respective models differs between libraries. Benchmarks are averaged across 100 runs on a NVIDIA RTX 2070 GPU with 8GB of memory. Standard deviations have been omitted because they are negligible. Some things that would be interesting to add:

  • check that forward dynamics are roughly equal in each case
  • effect of torch.compile on networks
  • effect of CUDA graph replay
  • memory consumption of different libraries
  • benchmarking JAX implementations

Author

  • Gregor Lenz holds a PhD in neuromorphic engineering from Sorbonne University and has been training SNNs for a little while now!
Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/p/snn-library-benchmarks/plotting.ipynb b/p/snn-library-benchmarks/plotting.ipynb new file mode 100644 index 00000000..244a2524 --- /dev/null +++ b/p/snn-library-benchmarks/plotting.ipynb @@ -0,0 +1,203 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "df = pd.read_csv('data.csv')\n", + "n_steps = 500\n", + "batch_size = 10" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df16k = df[df['neurons'] == 16384]\n", + "\n", + "totals16k = df16k[\"time [s]\"][df16k[\"pass\"] == \"forward\"].to_numpy() + df16k[\"time [s]\"][df16k[\"pass\"] == \"backward\"].to_numpy()\n", + "totals16k = totals16k.round(2)\n", + "frameworks = df16k[\"framework\"].unique()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import plotly.express as px\n", + "import plotly.graph_objects as go\n", + "\n", + "fig = px.bar(\n", + " df[df['neurons'] == 16384],\n", + " x=\"framework\",\n", + " y=\"time [s]\",\n", + " color=\"pass\",\n", + " log_y=True,\n", + " text_auto='.2f',\n", + ").add_trace(go.Scatter(\n", + " x=frameworks, y=totals16k*1.05, \n", + " mode='text',\n", + " text=totals16k,\n", + " textposition='top center',\n", + " showlegend=False\n", + "))\n", + "fig.data[0]['textposition'] = 'inside'\n", + "fig.data[1]['textposition'] = 'inside'\n", + "\n", + "fig.update_layout(\n", + " title=f\"16k neurons, 1 Linear + 1 LIF layer, {n_steps} time steps, batch size {batch_size}\",\n", + " xaxis={'categoryorder':'total ascending'},\n", + ")\n", + "# increase size of facet titles\n", + "fig.update_annotations(font_size=14)\n", + "\n", + "fig.write_image(\"framework-benchmarking-16k.png\", width=1024)\n", + "fig.write_image(\"framework-benchmarking-16k-header.png\", width=1600)\n", + "fig.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from PIL import Image\n", + "from PIL import ImageFont\n", + "from PIL import ImageDraw \n", + "\n", + "# print OOM on the image\n", + "font = ImageFont.truetype(\"/usr/share/fonts/truetype/freefont/FreeMono.ttf\", size=35, encoding=\"unic\")\n", + "img = Image.open(\"framework-benchmarking-16k.png\")\n", + "draw = ImageDraw.Draw(img).text((105, 340), \"OOM\", (0,0,0),font=font)\n", + "img.save('framework-benchmarking-16k.png')\n", + "\n", + "img = Image.open(\"framework-benchmarking-16k-header.png\")\n", + "draw = ImageDraw.Draw(img).text((130, 370), \"OOM\", (0,0,0),font=font)\n", + "img.save('framework-benchmarking-16k-header.png')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df4k = df[df['neurons'] == 4096]\n", + "\n", + "totals4k = df4k[\"time [s]\"][df4k[\"pass\"] == \"forward\"].to_numpy() + df4k[\"time [s]\"][df4k[\"pass\"] == \"backward\"].to_numpy()\n", + "totals4k = totals4k.round(2)\n", + "\n", + "fig = px.bar(\n", + " df4k,\n", + " x=\"framework\",\n", + " y=\"time [s]\",\n", + " color=\"pass\",\n", + " log_y=True,\n", + " text_auto='.2f',\n", + ").add_trace(go.Scatter(\n", + " x=frameworks, y=totals4k*1.05, \n", + " mode='text',\n", + " text=totals4k,\n", + " textposition='top center',\n", + " showlegend=False\n", + "))\n", + "fig.data[0]['textposition'] = 'inside'\n", + "fig.data[1]['textposition'] = 'inside'\n", + "fig.update_layout(\n", + " title=f\"4k neurons, 1 Linear + 1 LIF layer, {n_steps} time steps, batch size {batch_size}\",\n", + " xaxis={'categoryorder':'total ascending'},\n", + ")\n", + "# increase size of facet titles\n", + "fig.update_annotations(font_size=14)\n", + "fig.write_image(\"framework-benchmarking-4k.png\", width=1024)# scale=2)\n", + "fig.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df512 = df[df['neurons'] == 512]\n", + "df512[\"time [s]\"] *= 1000\n", + "\n", + "totals512 = df512[\"time [s]\"][df512[\"pass\"] == \"forward\"].to_numpy() + df512[\"time [s]\"][df512[\"pass\"] == \"backward\"].to_numpy()\n", + "totals512 = totals512.round(2)\n", + "\n", + "fig = px.bar(\n", + " df512,\n", + " x=\"framework\",\n", + " y=\"time [s]\",\n", + " color=\"pass\",\n", + " log_y=True,\n", + " text_auto=\".2f\",\n", + ").add_trace(go.Scatter(\n", + " x=frameworks, y=totals512*1.05, \n", + " mode='text',\n", + " text=totals512,\n", + " textposition='top center',\n", + " showlegend=False\n", + "))\n", + "fig.data[0]['textposition'] = 'inside'\n", + "fig.data[1]['textposition'] = 'inside'\n", + "fig.update_layout(\n", + " title=f\"512 neurons, 1 Linear + 1 LIF layer, {n_steps} time steps, batch size {batch_size}\",\n", + " xaxis={'categoryorder':'total ascending'},\n", + " yaxis_title=\"Time [ms]\",\n", + ")\n", + "# increase size of facet titles\n", + "fig.update_annotations(font_size=14)\n", + "\n", + "fig.write_image(\"framework-benchmarking-512.png\", width=1024)# scale=2)\n", + "fig.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "frameworks", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/p/snn-library-benchmarks/requirements.txt b/p/snn-library-benchmarks/requirements.txt new file mode 100644 index 00000000..05aedae7 --- /dev/null +++ b/p/snn-library-benchmarks/requirements.txt @@ -0,0 +1,8 @@ +sinabs-exodus +norse +snntorch +rockpool +kaleido +ipykernel +plotly +nbformat \ No newline at end of file diff --git a/p/snn-library-benchmarks/torch-compile-test.ipynb b/p/snn-library-benchmarks/torch-compile-test.ipynb new file mode 100644 index 00000000..476e26ac --- /dev/null +++ b/p/snn-library-benchmarks/torch-compile-test.ipynb @@ -0,0 +1,148 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import snntorch\n", + "\n", + "device = \"cpu\"\n", + "class Model(nn.Module):\n", + " def __init__(self, beta: float = 0.95):\n", + " super().__init__()\n", + " self.fc = nn.Linear(n_neurons, n_neurons)\n", + " self.lif = snntorch.Leaky(beta=beta)\n", + " self.mem = self.lif.init_leaky()\n", + "\n", + " def forward(self, x):\n", + " cur = self.fc(x)\n", + " spk, self.mem = self.lif(cur, self.mem)\n", + " return spk\n", + "\n", + "\n", + "torch.autograd.set_detect_anomaly(False)\n", + "\n", + "model = Model()\n", + "model = torch.compile(model)#, mode=\"max-autotune\")\n", + "model = model.to(device)\n", + "input_static = torch.randn(n_steps, batch_size, n_neurons).to(device) # n_steps\n", + "\n", + "output = []\n", + "for step in range(n_steps):\n", + " output.append(model(input_static[step]))\n", + " \n", + "torch.stack(output).sum().backward()\n", + "\n", + " # for inp in x:\n", + " # output.append(spk)\n", + " # return torch.stack(output)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from norse.torch.module.lif import LIF\n", + "from norse.torch import SequentialState\n", + "import norse\n", + "\n", + "model = SequentialState(\n", + " nn.Linear(n_neurons, n_neurons),\n", + " LIF(),\n", + ")\n", + "model = model.to(device)\n", + "model = torch.compile(model)#, mode=\"max-autotune\")\n", + "input_static = torch.randn(n_steps, batch_size, n_neurons).to(device)\n", + "\n", + "model(input_static)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch \n", + "import torch.nn as nn\n", + "\n", + "import sinabs.layers as sl\n", + "\n", + "device = \"cuda\"\n", + "model = nn.Sequential(\n", + " nn.Linear(10, 10),\n", + " sl.LIF(tau_mem=torch.tensor(10.)),\n", + ")\n", + "\n", + "model = torch.compile(model)\n", + "model.to(device)\n", + "\n", + "model(torch.randn((1, 20, 10), device=device))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import plotly.graph_objects as go\n", + "import pandas as pd\n", + "import numpy as np\n", + "\n", + "# Assume you have a DataFrame df with columns 'x', 'y1', 'y2'\n", + "df = pd.DataFrame({\n", + " 'x': ['A', 'B', 'C', 'D'],\n", + " 'y1': [10, 15, 7, 10],\n", + " 'y2': [5, 2, 7, 8]\n", + "})\n", + "\n", + "fig = go.Figure()\n", + "\n", + "fig.add_trace(go.Bar(x=df['x'], y=df['y1'], name='y1'))\n", + "fig.add_trace(go.Bar(x=df['x'], y=df['y2'], text=df['y2'], textposition='inside', name='y2'))\n", + "\n", + "# Compute totals for each x\n", + "totals = df['y1'] + df['y2']\n", + "\n", + "# Add totals as text using scatter plot\n", + "fig.add_trace(go.Scatter(\n", + " x=df['x'], y=totals, \n", + " mode='text',\n", + " text=totals,\n", + " textposition='top center',\n", + " showlegend=False\n", + "))\n", + "\n", + "fig.update_layout(barmode='stack')\n", + "\n", + "fig.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "frameworks310", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/p/snn-library-benchmarks/utils.py b/p/snn-library-benchmarks/utils.py new file mode 100644 index 00000000..9cb74354 --- /dev/null +++ b/p/snn-library-benchmarks/utils.py @@ -0,0 +1,84 @@ +from typing import Callable, Optional, List, Tuple +import warnings + +from time import time + + +def timeit( + callable: Callable, + min_runs: int = 3, + max_runs: int = 5000, + min_time: float = 2.0, + warmup_calls: int = 1, +) -> List[float]: + + # - Warmup + for _ in range(warmup_calls): + callable() + + # - Take at least min_time seconds, at least min_runs runs + exec_count = 0 + t_total = 0.0 + collected_times = [] + while ((t_total <= min_time) or (exec_count < min_runs)) and ( + exec_count < max_runs + ): + # - Time a single run + t_start = time() + callable() + collected_times.append(time() - t_start) + + exec_count += 1 + t_total = sum(collected_times) + + return collected_times + + +def benchmark_framework( + prepare_fn: Callable, + forward_fn: Callable, + backward_fn: Callable, + benchmark_desc: Optional[str] = None, + n_neurons: int = 512, + n_layers: int = 4, + n_steps: int = 500, + batch_size: int = 10, + device: str = "cpu", +) -> Tuple[List, List, List]: + forward_times = [] + backward_times = [] + + try: + # - Prepare benchmark + bench_dict = prepare_fn( + batch_size=batch_size, + n_steps=n_steps, + n_neurons=n_neurons, + n_layers=n_layers, + device=device + ) + + # - Forward pass + forward_times.append(timeit(lambda: forward_fn(bench_dict))) + bench_dict = forward_fn(bench_dict) + assert bench_dict["output"].shape == bench_dict["input"].shape + + # - Backward pass + backward_times.append(timeit(lambda: backward_fn(bench_dict))) + + except Exception as e: + # - Fail nicely with a warning if a benchmark dies + warnings.warn( + f"Benchmark {benchmark_desc} failed with error {str(e)}." + ) + + # - No results for this run + forward_times.append([]) + backward_times.append([]) + + # - Build a description of the benchmark + benchmark_desc = f"{benchmark_desc}; " if benchmark_desc is not None else "" + benchmark_desc = f"{benchmark_desc}B = {batch_size}, T = {n_steps}" + + # - Return benchmark results + return forward_times, backward_times diff --git a/p/spiking-neurons-a-digital-hardware-implementation/accumulation-loop.png b/p/spiking-neurons-a-digital-hardware-implementation/accumulation-loop.png new file mode 100644 index 00000000..059154e4 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/accumulation-loop.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/accumulation-loop_hu868ee57fbbd85ed207d780f73293926a_83987_1024x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/accumulation-loop_hu868ee57fbbd85ed207d780f73293926a_83987_1024x0_resize_box_3.png new file mode 100644 index 00000000..b936b53d Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/accumulation-loop_hu868ee57fbbd85ed207d780f73293926a_83987_1024x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/accumulation-loop_hu868ee57fbbd85ed207d780f73293926a_83987_480x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/accumulation-loop_hu868ee57fbbd85ed207d780f73293926a_83987_480x0_resize_box_3.png new file mode 100644 index 00000000..65489e99 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/accumulation-loop_hu868ee57fbbd85ed207d780f73293926a_83987_480x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/accumulation.png b/p/spiking-neurons-a-digital-hardware-implementation/accumulation.png new file mode 100644 index 00000000..329439a5 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/accumulation.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/accumulation_hu0fc8f88676c742058e81209d5a8437fb_76462_1024x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/accumulation_hu0fc8f88676c742058e81209d5a8437fb_76462_1024x0_resize_box_3.png new file mode 100644 index 00000000..0e53dfa1 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/accumulation_hu0fc8f88676c742058e81209d5a8437fb_76462_1024x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/accumulation_hu0fc8f88676c742058e81209d5a8437fb_76462_480x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/accumulation_hu0fc8f88676c742058e81209d5a8437fb_76462_480x0_resize_box_3.png new file mode 100644 index 00000000..86a96ab0 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/accumulation_hu0fc8f88676c742058e81209d5a8437fb_76462_480x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/index.html b/p/spiking-neurons-a-digital-hardware-implementation/index.html new file mode 100644 index 00000000..601f4c4c --- /dev/null +++ b/p/spiking-neurons-a-digital-hardware-implementation/index.html @@ -0,0 +1,35 @@ +Spiking neurons: a digital hardware implementation
Featured image of post Spiking neurons: a digital hardware implementation

Spiking neurons: a digital hardware implementation

In this article, we will try to model a Leaky Spiking Neuron (LIF) using digital hardware: registers, memories, adders and so on.

Spiking neurons

In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer.

Multiple pre-synaptic neurons connected to a post-synaptic one.

In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron. In the following, the destination neuron is denoted as post-synaptic neuron, with the index $i$, while the input neuron under consideration is denoted as pre-synaptic neuron, with the index $j$.

We denote the input spike train incoming from the pre-synaptic neuron with $\sigma_{j}(t)$: +$$ \sigma_{j}(t) = \sum_{k} \delta(t-t_{k}) $$ +where $t_{k}$ are the spike timestamps of the spike train $\sigma_{j}(t)$.

The synapse connecting the pre-synaptic neuron with the post-synaptic neuron is denoted with $w_{ij}$. All the incoming spike trains are then integrated by the post-synaptic neuron membrane; the integration function can be modeled by a first-order low-pass filter, denoted with $\alpha_{i}(t)$: +$$ \alpha_{i}(t) = \frac{1}{\tau_{u_{i}}} e^{-\frac{t}{\tau_{u_{i}}}}$$ +The spike train incoming from the pre-synaptic neuron, hence, is convolved with the membrane function; in real neurons, this corresponds to the input currents coming from the pre-synaptic neurons that charge the post-synaptic neuron membrane potential, $v_{i}(t)$. The sum of the currents in input to the post-synaptic neuron is denoted with $u_{i}(t)$ and modeled through the following equation: +$$ u_{i}(t) = \sum_{j \neq i}{w_{ij} \cdot (\alpha_{v} \ast \sigma_{j})(t)} $$ +Each pre-synaptic neuron contributes with a current (spike train multiplied by the $w_{ij}$ synapse) and these sum up at the input of the post-synaptic neuron. Given the membrane potential of the destination neuron, denoted with $v_{i}(t)$, the differential equation describing its evolution through time is the following: +$$ \frac{\partial}{\partial t} v_{i}(t) = -\frac{1}{\tau_{v}} v_{i}(t) + u_{i}(t)$$ +In addition to the input currents, we have the neuron leakage, $\frac{1}{\tau_{v}} v_{i}(t)$, modeled through a leakage coefficient $\frac{1}{\tau_{v}}$ that multiplies the membrane potential.

Discretising the model

Such a differential equation cannot be solved directly using discrete arithmetic, as it would be processed on digital hardware; hence, we need to discretise the equation. This discretisation leads to the following result: +$$ v_{i}[t] = \beta \cdot v_{i}[t-1] + (1 - \beta) \cdot u_{i}[t] - \theta \cdot S_{i}[t] $$ +where $\beta$ is the decay coefficient associated to the leakage. We embed $(1-\beta)$ in the input current $u_{i}[t]$, by merging it with the synapse weights as a scaling factor; in this way, the input current $u_{i}[t]$ is normalised regardless of the decay constant $\tau_{v}$ value.

Notice that the membrane reset mechanism has been added: when a neuron spikes, its membrane potential goes back to the rest potential (usually equal to zero), and this is modeled by subtracting the threshold $\theta$ from $v_{i}(t)$ when an output spike occurs. The output spike is modeled through a function $S_{i}[t]$: +$$ S_{i}[t] = 1 ~\text{if}~ v_{i}[t] \gt \theta ~\text{else}~ 0 $$ +This is equal to 1 at spike time (i.e. if at timestamp $t$ the membrane potential $v_{i}[t]$ is larger than the threshold $\theta$) and 0 elsewhere.

The input current is given by: +$$ u_{i}[t] = \sum_{j \neq i}{w_{ij} \cdot S_{j}[t]} $$
Notice that since $S_{i}[t]$ is either 0 or 1, the input current $u_{i}[t]$ is equal to the sum of the synapses weights of the pre-synaptic neurons that spike at timestamp $t$.

Storage and addressing neurons states

Let us define the layer fan-in, i.e. how many pre-synaptic neurons are connected in input to each post-synaptic neuron in the layer; we denote this number with $N$. Then, we set the total number of neurons in our layer to $M$.

How do we describe a neuron in hardware? First of all, we need to list some basic information associated to each post-synaptic neuron:

  • its membrane potential $v_{i}[t]$.
  • the weights associated with the synapses, $w_{ij}$; since each post-synaptic neuron is connected in input to $N$ neurons, these synapses can be grouped in an $N$-entries vector $W_{i}$.

Since there are $M$ neurons in the layer, we need an $M$-entries vector, denoted with $V[t]$, to store the membrane potentials values evaluated at timestamp $t$; this vector is associated with a memory array in the hardware architecture.

The membrane potentials memory.

An address is associated to each neuron, which can be thought as the $i$ index in the $V[t]$ vector; to obtain $v_{i}[t]$, the post-synaptic neuron address is used to index the membrane potentials memory $V[t]$.

We are able to store and retrieve a post-synaptic neuron membrane potential using a memory; now, we would like to charge it with the pre-synaptic neurons currents in order to emulate the behaviour of a neuron membrane; to do that, we need to get the corresponding input synapses $W_{i}$, multiply these by the spikes of the associated pre-synaptic neurons, sum them up and, then, accumulate these in the post-synaptic neuron membrane.

Let us start from a single input pre-synaptic neuron: +$$ u_{ij}[t] = w_{ij} \cdot S_{j}[t] $$ +We know that $S_{j}[t]$ is either 1 or 0; hence, we have either $u_{ij}[t] = w_{ij}$ or $u_{ij}[t] = 0$; this means that the synapse weight is either added or not to the total current $u_{i}[t]$; hence, the weight $w_{ij}$ is read from memory only if the corresponding pre-synaptic neuron spikes! Given a layer of $M$ neurons, each of which is connected in input to $N$ synapses, we can think of grouping the $M \cdot N$ weights in a matrix, which can be associated with another memory array, denoted with $W$.

The synapses weights memory.

This memory is addressed with the pre-synaptic neuron and the post-synaptic neuron indices to retrieve the weight $w_{ij}$, which automatically corresponds to the $u_{ij}[t]$ current being accumulated in the post-synaptic neuron membrane when the pre-synaptic neuron spikes at timestamp $t$.

Spikes accumulation

Let us implement neural functionalities using the data structures defined for a neuron (i.e. membrane potential and synapses), starting with the membrane potential charging of a post-synaptic neuron. When the pre-synaptic neuron spikes, its synapse weight $w_{ij}$ gets extracted from the synapse memory $W$ and multiplied by the spike; since the spike is a digital bit equal to 1, this is equivalent to using $w_{ij}$ itself as input current for the post-synaptic neuron; to add this current to $v_{i}[t]$, we need to use an arithmetic circuit called adder!

The spikes accumulation circuit.

The membrane potential $v_{i}[t]$ is read from the potentials memory $V[t]$ and added to the corresponding synapse current $w_{ij}$; the result is the membrane potential of the next time step, $v_{i}[t+1]$, that is stored in the register put on the adder output; this value is written back to the $V[t]$ memory in the next clock cycle. The register storing the adder output is denoted as membrane register.

To prevent multiple read-write cycles due to multiple spiking pre-synaptic neurons, one can think of adding a loop to the membrane register in order to accumulate all the currents of the pre-synaptic neurons that are spiking at timestep $t$ and writing the final value $v_{i}[t+1]$ back to memory only once. The corresponding circuit is shown in the following.

Adding a loop register to accumulate multiple spikes before the write-back to memory.

A multiplexer is placed on one side of the adder; in this way:

  • the first weight $w_{i0}$ to be accumulated is added to the $v_{i}[t]$ read from memory and saved to the membrane register: +$$ v_{i}[t+1] = v_{i}[t] + w_{i0} $$
  • the successive weights are added to the membrane register content, so that all the currents are accumulated before writing $v_{i}[t+1]$ back to memory; using a non rigorous notation, this can be translated to the following equation: +$$ v_{i}[t+1] = v_{i}[t+1] + w_{ij},~ 0 \lt j \leq N $$

Excitatory and inhibitory neurons

Our post-synaptic neuron is able to accumulate spikes in its membrane; however, input spikes do not always result in membrane potential charging! In fact, a pre-synaptic neuron can be excitatory (i.e. it charges the post-synaptic neuron membrane) or inhibitory (i.e. it discharges the post-synaptic neuron membrane); in the digital circuit, this phenomenon corresponds to adding or subtracting, respectively, the synapse weight $w_{ij}$ to or from $v_{i}[t]$; this functionality can be added to the architecture by placing an adder capable of performing both additions and subtractions, choosing among these with a control signal generated by an FSM (Finite State Machine), which is a sequential digital circuit that evolves through a series of states depending on its inputs and, consequently, generates controls signals for the rest of the circuit.

Control circuit for choosing between excitatory and inhibitory stimula.

This FSM, given the operation to be executed on the post-synaptic neuron, chooses if the adder has to add or subtract the synapse current.

However, is this design efficient in terms of resources employed? It has to be reminded that inhibitory and excitatory neurons are chosen at chip programming time; this means that the neuron type does not change during the chip operation (however, with the solution we are about to propose, it would not be a problem to change the neuron type on-the-fly); hence, we can embed this information in the neuron description by adding a bit to the synapses weights memory row that, depending on its value, denotes that neuron as excitatory or inhibitory.

Synapses weight storage in memory.

Suppose that, given a pre-synaptic neuron, all its $M$ output synapses are stored in a memory row of $n$ bits words, where $n$ is the number of bits to which the synapse weight is quantized. At the end of the memory row $j$, we add a bit denoted with $e_{j}$ that identifies the neuron type and that is read together with the weights from the same memory row: if the pre-synaptic neuron $j$ is excitatory, $e_{j}=1$ and the weight is added; if it is inhibitory, $e_{j}=0$ and the weight is subtracted; in this way, the $e_{j}$ field of the synapse can drive the adder directly.

Using the neuron type bit to drive the adder.

Leakage

Let us introduce the characteristic feature of the LIF neuron: the leakage! We shall choose a (constant) leakage factor $\beta$ and multiply it by $v_{i}[t]$ to obtain $v_{i}[t+1]$, which is lower than $v_{i}[t]$ since some current has leaked from the membrane, and we model this through $\beta$: +$$ v_{i}[t+1] = \beta \cdot v_{i}[t] $$ +However, multiplication is an expensive operation in hardware; furthermore, the leakage factor is smaller than one, so we would need to perform a fixed-point multiplication or, even worse, a division! How can we solve this problem?

If we choose $\beta$ as a power of $\frac{1}{2}$, such as $2^{-n}$, the multiplication becomes equivalent to a $n$-positions right shift! A really hardware-friendly operation!

Leakage circuit.

In this circuit, an $n$-positions righ-shift block, denoted with the symbol >>, is placed on one of the adder inputs to obtain $\beta \cdot v_{i}[t]$ from $v_{i}[t]$. A multiplexer is introduced to choose among the synapse weight $w_{ij}$ and the leakage contribution $\beta \cdot v_{i}[t]$ as input to the adder.

Notice that the leakage has to be always subtracted from the membrane potential; hence, we cannot use $e_{j}$ directly to control the adder but we must modify the circuit so that a subtraction is performed during a leakage operation, regardless of the value of $e_{j}$. A possible solution is to use a signal from the FSM and a logic AND gate to force the adder control signal to 0 during a leakage operation.

Simplified leakage circuit.

Denoting with adder_ctrl the signal which controls the adder and with leak_op_n the one provided by the FSM, and stating that:

  • for adder_ctrl=1, the adder performs an addition, otherwise a subtraction.
  • leak_op_n=0 when a leakage operation has to performed.

adder_ctrl can be obtained as the logic AND operation of leak_op_n and $e_{j}$ so that, when leak_op_n=0, adder_ctrl=0 regardless of the value of $e_{j}$ and a subtraction is performed by the adder.

Spike mechanism

Our neuron needs to spike! If this is encoded as a single digital bit, given the spiking threshold $\theta$, we compare $v_{i}[t]$ to $\theta$ and generate a logic 1 in output when the membrane potential is larger than the threshold. This can be implemented using a comparator circuit.

Spike circuit.

The output of the comparator is used directly as spike bit.

The membrane has to be reset to a rest potential when the neuron spikes; hence, we need to subtract $\theta$ from $v_{i}[t]$ when the neuron fires. This can be done by driving the input multiplexer of the membrane register to provide $\theta$ in input to the adder, that performs a subtraction.

Membrane reset by threshold subtraction.

This circuit can be simplified:

  • by choosing $\theta = 2^m-1$, where $m$ is the bitwidth of the membrane register and the adder, having $v_{i}[t] \gt \theta$ is equivalent to having an overflow in the addition; hence, the comparison result is equal to the overflow flag of the adder, which can be provided directly in output as spike bit.
  • instead of subtracting $\theta$ from the membrane register, we can reset $v_{i}[t]$ to 0 when a spike occurs by forcing the membrane register content to 0 with a control signal; this is equivalent to using the oveflow flag of the adder as reset signal for the membrane register. This should not be done in an actual implementation: at least a register should be added on the reset signal of the membrane register to prevent glitches in the adder circuit from resetting it when it should not be.

The resulting circuit is the following.

Membrane reset by membrane potential zeroing.

Conclusion

Here we are, with a first prototype of our LIF layer digital circuit. In the next episode:

  • we will make it actually work. Right now, this is a functional model, that needs some modifications to behave correctly as a spiking neurons layer.
  • we will implement it in Verilog.
  • we will simulate it using open source tools, such as Verilator.

Acknowledgements

I would like to thank Jason Eshraghian, Steven Abreu and Gregor Lenz for the valuable corrections and comments that made this article way better than the original draft!

Credits

The cover image is the Loihi die, taken from WikiChip.

Authors

  • Fabrizio Ottati is a Ph.D. student in the HLS Laboratory of the Department of Electronics and Communications, Politecnico di Torino. His main interests are event-based cameras, digital hardware design and neuromorphic computing. He is one of the maintainers of two open source projects in the field of neuromorphic computing, Tonic and Expelliarmus, and one of the founders of Open Neuromorphic.

Bibliography

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/p/spiking-neurons-a-digital-hardware-implementation/inhibitory.png b/p/spiking-neurons-a-digital-hardware-implementation/inhibitory.png new file mode 100644 index 00000000..f7f20b6f Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/inhibitory.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/inhibitory_huca9ce09602971950892899452f33315f_93376_1024x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/inhibitory_huca9ce09602971950892899452f33315f_93376_1024x0_resize_box_3.png new file mode 100644 index 00000000..9170ecb3 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/inhibitory_huca9ce09602971950892899452f33315f_93376_1024x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/inhibitory_huca9ce09602971950892899452f33315f_93376_480x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/inhibitory_huca9ce09602971950892899452f33315f_93376_480x0_resize_box_3.png new file mode 100644 index 00000000..11c3fc29 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/inhibitory_huca9ce09602971950892899452f33315f_93376_480x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/leak.png b/p/spiking-neurons-a-digital-hardware-implementation/leak.png new file mode 100644 index 00000000..4adbd8dc Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/leak.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/leak_hu6efb26cc282f1e71f25be47c1b1e2a9f_119349_1024x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/leak_hu6efb26cc282f1e71f25be47c1b1e2a9f_119349_1024x0_resize_box_3.png new file mode 100644 index 00000000..e22020ca Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/leak_hu6efb26cc282f1e71f25be47c1b1e2a9f_119349_1024x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/leak_hu6efb26cc282f1e71f25be47c1b1e2a9f_119349_480x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/leak_hu6efb26cc282f1e71f25be47c1b1e2a9f_119349_480x0_resize_box_3.png new file mode 100644 index 00000000..a1650684 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/leak_hu6efb26cc282f1e71f25be47c1b1e2a9f_119349_480x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/loihi.png b/p/spiking-neurons-a-digital-hardware-implementation/loihi.png new file mode 100644 index 00000000..a7f54c8a Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/loihi.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/loihi_hubbeea76b711d6bc9f1399bce5b467dbf_1011331_120x120_fill_box_smart1_3.png b/p/spiking-neurons-a-digital-hardware-implementation/loihi_hubbeea76b711d6bc9f1399bce5b467dbf_1011331_120x120_fill_box_smart1_3.png new file mode 100644 index 00000000..48d14f9a Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/loihi_hubbeea76b711d6bc9f1399bce5b467dbf_1011331_120x120_fill_box_smart1_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/loihi_hubbeea76b711d6bc9f1399bce5b467dbf_1011331_1600x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/loihi_hubbeea76b711d6bc9f1399bce5b467dbf_1011331_1600x0_resize_box_3.png new file mode 100644 index 00000000..3a87a6d7 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/loihi_hubbeea76b711d6bc9f1399bce5b467dbf_1011331_1600x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/loihi_hubbeea76b711d6bc9f1399bce5b467dbf_1011331_800x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/loihi_hubbeea76b711d6bc9f1399bce5b467dbf_1011331_800x0_resize_box_3.png new file mode 100644 index 00000000..55b4f368 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/loihi_hubbeea76b711d6bc9f1399bce5b467dbf_1011331_800x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/membrane-potentials.png b/p/spiking-neurons-a-digital-hardware-implementation/membrane-potentials.png new file mode 100644 index 00000000..90e23861 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/membrane-potentials.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/membrane-potentials_hu5bbfd51006a32eb425545f88180c0c97_62942_1024x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/membrane-potentials_hu5bbfd51006a32eb425545f88180c0c97_62942_1024x0_resize_box_3.png new file mode 100644 index 00000000..5265d6dc Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/membrane-potentials_hu5bbfd51006a32eb425545f88180c0c97_62942_1024x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/membrane-potentials_hu5bbfd51006a32eb425545f88180c0c97_62942_480x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/membrane-potentials_hu5bbfd51006a32eb425545f88180c0c97_62942_480x0_resize_box_3.png new file mode 100644 index 00000000..0990f20c Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/membrane-potentials_hu5bbfd51006a32eb425545f88180c0c97_62942_480x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/modified-adder.png b/p/spiking-neurons-a-digital-hardware-implementation/modified-adder.png new file mode 100644 index 00000000..931f0519 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/modified-adder.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/modified-adder_huafc096ac915ea43ada89c8d177c22b8a_83317_1024x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/modified-adder_huafc096ac915ea43ada89c8d177c22b8a_83317_1024x0_resize_box_3.png new file mode 100644 index 00000000..f9ebd3e6 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/modified-adder_huafc096ac915ea43ada89c8d177c22b8a_83317_1024x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/modified-adder_huafc096ac915ea43ada89c8d177c22b8a_83317_480x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/modified-adder_huafc096ac915ea43ada89c8d177c22b8a_83317_480x0_resize_box_3.png new file mode 100644 index 00000000..3a532697 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/modified-adder_huafc096ac915ea43ada89c8d177c22b8a_83317_480x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/neurons-connected.png b/p/spiking-neurons-a-digital-hardware-implementation/neurons-connected.png new file mode 100644 index 00000000..1c8d7876 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/neurons-connected.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/neurons-connected_hufde41aca3783fa5bf23a53f4b1be2dc3_93109_1024x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/neurons-connected_hufde41aca3783fa5bf23a53f4b1be2dc3_93109_1024x0_resize_box_3.png new file mode 100644 index 00000000..f23d454e Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/neurons-connected_hufde41aca3783fa5bf23a53f4b1be2dc3_93109_1024x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/neurons-connected_hufde41aca3783fa5bf23a53f4b1be2dc3_93109_480x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/neurons-connected_hufde41aca3783fa5bf23a53f4b1be2dc3_93109_480x0_resize_box_3.png new file mode 100644 index 00000000..3e8f8797 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/neurons-connected_hufde41aca3783fa5bf23a53f4b1be2dc3_93109_480x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/reset.png b/p/spiking-neurons-a-digital-hardware-implementation/reset.png new file mode 100644 index 00000000..bdd15158 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/reset.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/reset_hu263ed7102fcea0fbf00e41d0963a3f26_147149_1024x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/reset_hu263ed7102fcea0fbf00e41d0963a3f26_147149_1024x0_resize_box_3.png new file mode 100644 index 00000000..f39bbb2b Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/reset_hu263ed7102fcea0fbf00e41d0963a3f26_147149_1024x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/reset_hu263ed7102fcea0fbf00e41d0963a3f26_147149_480x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/reset_hu263ed7102fcea0fbf00e41d0963a3f26_147149_480x0_resize_box_3.png new file mode 100644 index 00000000..0c0e82cb Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/reset_hu263ed7102fcea0fbf00e41d0963a3f26_147149_480x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/smart-reset.png b/p/spiking-neurons-a-digital-hardware-implementation/smart-reset.png new file mode 100644 index 00000000..3dd48d91 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/smart-reset.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/smart-reset_hud76884d98bd3d3ec19919eaa8e00ac8a_133275_1024x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/smart-reset_hud76884d98bd3d3ec19919eaa8e00ac8a_133275_1024x0_resize_box_3.png new file mode 100644 index 00000000..87631439 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/smart-reset_hud76884d98bd3d3ec19919eaa8e00ac8a_133275_1024x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/smart-reset_hud76884d98bd3d3ec19919eaa8e00ac8a_133275_480x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/smart-reset_hud76884d98bd3d3ec19919eaa8e00ac8a_133275_480x0_resize_box_3.png new file mode 100644 index 00000000..777a9b39 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/smart-reset_hud76884d98bd3d3ec19919eaa8e00ac8a_133275_480x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/spike.png b/p/spiking-neurons-a-digital-hardware-implementation/spike.png new file mode 100644 index 00000000..7f9eb41e Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/spike.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/spike_hu96016051d2ee2ee0e53c45c191e43005_138579_1024x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/spike_hu96016051d2ee2ee0e53c45c191e43005_138579_1024x0_resize_box_3.png new file mode 100644 index 00000000..075a64e7 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/spike_hu96016051d2ee2ee0e53c45c191e43005_138579_1024x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/spike_hu96016051d2ee2ee0e53c45c191e43005_138579_480x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/spike_hu96016051d2ee2ee0e53c45c191e43005_138579_480x0_resize_box_3.png new file mode 100644 index 00000000..0f46b8c8 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/spike_hu96016051d2ee2ee0e53c45c191e43005_138579_480x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/subtract-leak.png b/p/spiking-neurons-a-digital-hardware-implementation/subtract-leak.png new file mode 100644 index 00000000..2c55382d Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/subtract-leak.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/subtract-leak_hud2de163dc45acbdca811dfc3794ca9d9_120661_1024x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/subtract-leak_hud2de163dc45acbdca811dfc3794ca9d9_120661_1024x0_resize_box_3.png new file mode 100644 index 00000000..13abd3ce Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/subtract-leak_hud2de163dc45acbdca811dfc3794ca9d9_120661_1024x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/subtract-leak_hud2de163dc45acbdca811dfc3794ca9d9_120661_480x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/subtract-leak_hud2de163dc45acbdca811dfc3794ca9d9_120661_480x0_resize_box_3.png new file mode 100644 index 00000000..ae6d88ca Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/subtract-leak_hud2de163dc45acbdca811dfc3794ca9d9_120661_480x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/synapse-encoding.png b/p/spiking-neurons-a-digital-hardware-implementation/synapse-encoding.png new file mode 100644 index 00000000..9289f5da Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/synapse-encoding.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/synapse-encoding_hu15da4478632c6b64a6b83607aeaa5423_8608_1024x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/synapse-encoding_hu15da4478632c6b64a6b83607aeaa5423_8608_1024x0_resize_box_3.png new file mode 100644 index 00000000..a589e89b Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/synapse-encoding_hu15da4478632c6b64a6b83607aeaa5423_8608_1024x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/synapse-encoding_hu15da4478632c6b64a6b83607aeaa5423_8608_480x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/synapse-encoding_hu15da4478632c6b64a6b83607aeaa5423_8608_480x0_resize_box_3.png new file mode 100644 index 00000000..176de58e Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/synapse-encoding_hu15da4478632c6b64a6b83607aeaa5423_8608_480x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/synapses-weights.png b/p/spiking-neurons-a-digital-hardware-implementation/synapses-weights.png new file mode 100644 index 00000000..ee6e0fcd Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/synapses-weights.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/synapses-weights_hu67c5768f8825891815b89681ae240b1d_83094_1024x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/synapses-weights_hu67c5768f8825891815b89681ae240b1d_83094_1024x0_resize_box_3.png new file mode 100644 index 00000000..d51effae Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/synapses-weights_hu67c5768f8825891815b89681ae240b1d_83094_1024x0_resize_box_3.png differ diff --git a/p/spiking-neurons-a-digital-hardware-implementation/synapses-weights_hu67c5768f8825891815b89681ae240b1d_83094_480x0_resize_box_3.png b/p/spiking-neurons-a-digital-hardware-implementation/synapses-weights_hu67c5768f8825891815b89681ae240b1d_83094_480x0_resize_box_3.png new file mode 100644 index 00000000..3cb95e89 Binary files /dev/null and b/p/spiking-neurons-a-digital-hardware-implementation/synapses-weights_hu67c5768f8825891815b89681ae240b1d_83094_480x0_resize_box_3.png differ diff --git a/page/1/index.html b/page/1/index.html new file mode 100644 index 00000000..7bc90633 --- /dev/null +++ b/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/ \ No newline at end of file diff --git a/page/2/index.html b/page/2/index.html new file mode 100644 index 00000000..8c413d85 --- /dev/null +++ b/page/2/index.html @@ -0,0 +1,6 @@ +Open Neuromorphic
Featured image of post Open Neuromorphic

Open Neuromorphic

Organization that aims at providing one place to reference all relevant open-source project in the neuromorphic research domain.

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/page/index.html b/page/index.html new file mode 100644 index 00000000..21941c97 --- /dev/null +++ b/page/index.html @@ -0,0 +1,6 @@ +Pages

Section

6 pages

Pages

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/page/index.xml b/page/index.xml new file mode 100644 index 00000000..7784b423 --- /dev/null +++ b/page/index.xml @@ -0,0 +1,9 @@ +Pages on Open Neuromorphichttps://open-neuromorphic.org/page/Recent content in Pages on Open NeuromorphicHugo -- gohugo.ioen-usAbouthttps://open-neuromorphic.org/about/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/about/<img src="https://open-neuromorphic.org/about/ONM.png" alt="Featured image of post About" />This organisation is created by a loose collective of open source collaborators across academia, industry and individual contributors. Most of us have never met in person before but have started contributing to a common or each other&rsquo;s projects! What connects us is the love for building tools that can be used in the neuromorphic community and we want to share ownership of this vision. If you feel like that resonates with you, please don&rsquo;t hesitate to get in touch!Eventshttps://open-neuromorphic.org/events/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/events/<img src="https://open-neuromorphic.org/img/ONM.png" alt="Featured image of post Events" />Upcoming events Join our newsletter to be updated on new events and get a reminder! +2023-09-25: Giulia D&rsquo;Angelo, What&rsquo;s catching your eye? The visual attention mechanism Giulia D&rsquo;Angelo +Time 6PM-7:30PM, CEST. +Abstract Every agent, whether animal or robotic, needs to process its visual sensory input in an efficient way, to allow understanding of, and interaction with, the environment. The process of filtering revelant information out of the continuous bombardment of complex sensory data is called selective attention.Events recordingshttps://open-neuromorphic.org/events-recordings/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/events-recordings/2023-01-26: Trevor Bekolay, Nengo - Applied Brain Research Recording https://youtu.be/sgu9l_bqAHM +Slides click here +Speaker&rsquo;s bio Trevor Bekolay’s primary research interest is in learning and memory. In his Master’s degree, he explored how to do supervised, unsupervised, and reinforcement learning in networks of biologically plausible spiking neurons. In his PhD, he applied this knowledge to the domain of speech to explore how sounds coming into the ear become high-level linguistic representations, and how those representations become sequences of vocal tract movements that produce speech.Getting involvedhttps://open-neuromorphic.org/getting-involved/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/getting-involved/The easiest way to get in touch is probably through Discord, where we discuss research topics, job opportunities, open hardware, spiking neural network training and much more. We&rsquo;d be delighted to have you join! If you feel like contributing to ONM but you&rsquo;re not exactly sure how, here are some ideas to get you started: +Link an interesting open source repository to our collection so that others can find it too!Resourceshttps://open-neuromorphic.org/resources/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/resources/Please check our Github repository for a list of neuromorphic open source software and hardware!Teamhttps://open-neuromorphic.org/team/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/team/Fabrizio Ottati Fabrizio Ottati +Fabrizio Ottati is a Ph.D. student in the Department of Electronics and Communications of Politecnico di Torino, under the supervision of professor Luciano Lavagno and professor Mario Roberto Casu. +His main interests are event-based cameras, digital hardware design and automation, spiking neural networks and piedmontese red wine. He is the maintainer of two open source projects in the field of neuromorphic computing, Tonic and Expelliarmus. You can find more information on his website. \ No newline at end of file diff --git a/page/page/1/index.html b/page/page/1/index.html new file mode 100644 index 00000000..325899b1 --- /dev/null +++ b/page/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/page/ \ No newline at end of file diff --git a/page/page/2/index.html b/page/page/2/index.html new file mode 100644 index 00000000..78bd027a --- /dev/null +++ b/page/page/2/index.html @@ -0,0 +1,6 @@ +Pages

Section

6 pages

Pages

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/post/index.html b/post/index.html new file mode 100644 index 00000000..bb6d3575 --- /dev/null +++ b/post/index.html @@ -0,0 +1,6 @@ +Posts

Section

6 pages

Posts

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/post/index.xml b/post/index.xml new file mode 100644 index 00000000..2df8c109 --- /dev/null +++ b/post/index.xml @@ -0,0 +1,6 @@ +Posts on Open Neuromorphichttps://open-neuromorphic.org/post/Recent content in Posts on Open NeuromorphicHugo -- gohugo.ioen-usWed, 02 Aug 2023 00:00:00 +0000SNN library benchmarkshttps://open-neuromorphic.org/p/snn-library-benchmarks/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/p/snn-library-benchmarks/<img src="https://open-neuromorphic.org/p/snn-library-benchmarks/framework-benchmarking-16k-header.png" alt="Featured image of post SNN library benchmarks" />SNN library benchmarks Open Neuromorphic&rsquo;s list of SNN frameworks currently counts 10 libraries, and those are only the most popular ones! As the sizes of spiking neural network models grow thanks to deep learning, optimization becomes more important for researchers and practitioners alike. Training SNNs is often slow, as the stateful networks are typically fed sequential inputs. Today&rsquo;s most popular training method then is some form of backpropagation through time, whose time complexity scales with the number of time steps.Bits of Chips | TrueNorthhttps://open-neuromorphic.org/p/bits-of-chips-truenorth/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/p/bits-of-chips-truenorth/<img src="https://open-neuromorphic.org/p/bits-of-chips-truenorth/brain-to-chip.png" alt="Featured image of post Bits of Chips | TrueNorth" />Why do we want to emulate the brain? If you have ever read an article on neuromorphic computing, you might have noticed that in the introduction of each of these there is the same statement: &ldquo;The brain is much powerful than any AI machine when it comes to cognitive tasks but it runs on a 10W power budget!&rdquo;. This is absolutely true: neurons in the brain communicate among each other by means of spikes, which are short voltage pulses that propagate from one neuron to the other.Efficient compression for event-based datahttps://open-neuromorphic.org/p/efficient-compression-for-event-based-data/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/<img src="https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/file_read_benchmark.png" alt="Featured image of post Efficient compression for event-based data" />Efficient compression for event-based data Datasets grow larger in size As neuromorphic algorithms tackle more complex tasks that are linked to bigger datasets, and event cameras mature to have higher spatial resolution, it is worth looking at how to encode that data efficiently when storing it on disk. To give you an example, Prophesee&rsquo;s latest automotive object detection dataset is some 3.5 TB in size for under 40h of recordings with a single camera.Digital neuromophic hardware read listhttps://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/<img src="https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png" alt="Featured image of post Digital neuromophic hardware read list" />Here&rsquo;s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles! +If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here. +2015 TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al.Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer. +In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron.Open Neuromorphichttps://open-neuromorphic.org/p/open-neuromorphic/Wed, 21 Dec 2022 00:00:00 +0000https://open-neuromorphic.org/p/open-neuromorphic/<img src="https://open-neuromorphic.org/p/open-neuromorphic/ONM.png" alt="Featured image of post Open Neuromorphic" />This organisation is created by a loose collective of open source collaborators across academia, industry and individual contributors. What connects us is the love for building tools that can be used in the neuromorphic community and we want to share ownership of this vision. +Open Neuromorphic (ONM) provides the following things: +A curated list of software frameworks to make it easier to find the tool you need. A platform for your code. \ No newline at end of file diff --git a/post/page/1/index.html b/post/page/1/index.html new file mode 100644 index 00000000..3040b8fc --- /dev/null +++ b/post/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/post/ \ No newline at end of file diff --git a/post/page/2/index.html b/post/page/2/index.html new file mode 100644 index 00000000..3dcc899c --- /dev/null +++ b/post/page/2/index.html @@ -0,0 +1,6 @@ +Posts

Section

6 pages

Posts

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/resources/index.html b/resources/index.html new file mode 100644 index 00000000..18e1b3ef --- /dev/null +++ b/resources/index.html @@ -0,0 +1,9 @@ +Resources
Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/scss/style.min.8191399262444ab68b72a18c97392f5349be20a1615d77445be51e974c144cff.css b/scss/style.min.8191399262444ab68b72a18c97392f5349be20a1615d77445be51e974c144cff.css new file mode 100644 index 00000000..96d03984 --- /dev/null +++ b/scss/style.min.8191399262444ab68b72a18c97392f5349be20a1615d77445be51e974c144cff.css @@ -0,0 +1,10 @@ +/*!* Hugo Theme Stack +* +* @author: Jimmy Cai +* @website: https://jimmycai.com +* @link: https://github.com/CaiJimmy/hugo-theme-stack*/:root{--main-top-padding:35px;--body-background:#f5f5fa;--accent-color:#34495e;--accent-color-darker:#2c3e50;--accent-color-text:#fff;--body-text-color:#bababa;--tag-border-radius:4px;--section-separation:40px;--scrollbar-thumb:hsl(0, 0%, 85%);--scrollbar-track:var(--body-background)}@media(min-width:1280px){:root{--main-top-padding:50px}}:root[data-scheme=dark]{--body-background:#303030;--accent-color:#ecf0f1;--accent-color-darker:#bdc3c7;--accent-color-text:#000;--body-text-color:rgba(255, 255, 255, 0.7);--scrollbar-thumb:hsl(0, 0%, 40%);--scrollbar-track:var(--body-background)}:root{--sys-font-family:-apple-system, BlinkMacSystemFont, "Segoe UI", "Droid Sans", "Helvetica Neue";--zh-font-family:"PingFang SC", "Hiragino Sans GB", "Droid Sans Fallback", "Microsoft YaHei";--base-font-family:"Lato", var(--sys-font-family), var(--zh-font-family), sans-serif;--code-font-family:Menlo, Monaco, Consolas, "Courier New", monospace}:root{--card-background:#fff;--card-background-selected:#eaeaea;--card-text-color-main:#000;--card-text-color-secondary:#747474;--card-text-color-tertiary:#bababa;--card-separator-color:rgba(218, 218, 218, 0.5);--card-border-radius:10px;--card-padding:20px;--small-card-padding:25px 20px}@media(min-width:768px){:root{--card-padding:25px}}@media(min-width:1280px){:root{--card-padding:30px}}@media(min-width:768px){:root{--small-card-padding:25px}}:root[data-scheme=dark]{--card-background:#424242;--card-background-selected:rgba(255, 255, 255, 0.16);--card-text-color-main:rgba(255, 255, 255, 0.9);--card-text-color-secondary:rgba(255, 255, 255, 0.7);--card-text-color-tertiary:rgba(255, 255, 255, 0.5);--card-separator-color:rgba(255, 255, 255, 0.12)}:root{--article-font-family:var(--base-font-family);--article-font-size:1.6rem;--article-line-height:1.85}@media(min-width:768px){:root{--article-font-size:1.7rem}}:root{--blockquote-border-size:4px;--blockquote-background-color:rgb(248 248 248);--heading-border-size:4px;--link-background-color:189, 195, 199;--link-background-opacity:0.5;--link-background-opacity-hover:0.7;--pre-background-color:#272822;--pre-text-color:#f8f8f2;--code-background-color:rgba(0, 0, 0, 0.12);--code-text-color:#808080;--table-border-color:#dadada;--tr-even-background-color:#efefee}:root[data-scheme=dark]{--code-background-color:#272822;--code-text-color:rgba(255, 255, 255, 0.9);--table-border-color:#717171;--tr-even-background-color:#545454;--blockquote-background-color:rgb(75 75 75)}:root{--shadow-l1:0px 4px 8px rgba(0, 0, 0, 0.04), 0px 0px 2px rgba(0, 0, 0, 0.06), 0px 0px 1px rgba(0, 0, 0, 0.04);--shadow-l2:0px 10px 20px rgba(0, 0, 0, 0.04), 0px 2px 6px rgba(0, 0, 0, 0.04), 0px 0px 1px rgba(0, 0, 0, 0.04);--shadow-l3:0px 10px 20px rgba(0, 0, 0, 0.04), 0px 2px 6px rgba(0, 0, 0, 0.04), 0px 0px 1px rgba(0, 0, 0, 0.04);--shadow-l4:0px 24px 32px rgba(0, 0, 0, 0.04), 0px 16px 24px rgba(0, 0, 0, 0.04), 0px 4px 8px rgba(0, 0, 0, 0.04), + 0px 0px 1px rgba(0, 0, 0, 0.04)}[data-scheme=light]{--pre-text-color:#272822;--pre-background-color:#fafafa}[data-scheme=light] .chroma{color:#272822;background-color:#fafafa}[data-scheme=light] .chroma .err{color:#960050}[data-scheme=light] .chroma .lntd{vertical-align:top;padding:0;margin:0;border:0}[data-scheme=light] .chroma .lntable{border-spacing:0;padding:0;margin:0;border:0;width:100%;display:block}[data-scheme=light] .chroma .lntable>tbody{display:block;width:100%}[data-scheme=light] .chroma .lntable>tbody>tr{display:flex;width:100%}[data-scheme=light] .chroma .lntable>tbody>tr>td:last-child{overflow-x:auto}[data-scheme=light] .chroma .hl{display:block;width:100%;background-color:#ffc}[data-scheme=light] .chroma .lnt{margin-right:.4em;padding:0 .4em;color:#7f7f7f}[data-scheme=light] .chroma .ln{margin-right:.4em;padding:0 .4em;color:#7f7f7f}[data-scheme=light] .chroma .k{color:#00a8c8}[data-scheme=light] .chroma .kc{color:#00a8c8}[data-scheme=light] .chroma .kd{color:#00a8c8}[data-scheme=light] .chroma .kn{color:#f92672}[data-scheme=light] .chroma .kp{color:#00a8c8}[data-scheme=light] .chroma .kr{color:#00a8c8}[data-scheme=light] .chroma .kt{color:#00a8c8}[data-scheme=light] .chroma .n{color:#111}[data-scheme=light] .chroma .na{color:#75af00}[data-scheme=light] .chroma .nb{color:#111}[data-scheme=light] .chroma .bp{color:#111}[data-scheme=light] .chroma .nc{color:#75af00}[data-scheme=light] .chroma .no{color:#00a8c8}[data-scheme=light] .chroma .nd{color:#75af00}[data-scheme=light] .chroma .ni{color:#111}[data-scheme=light] .chroma .ne{color:#75af00}[data-scheme=light] .chroma .nf{color:#75af00}[data-scheme=light] .chroma .fm{color:#111}[data-scheme=light] .chroma .nl{color:#111}[data-scheme=light] .chroma .nn{color:#111}[data-scheme=light] .chroma .nx{color:#75af00}[data-scheme=light] .chroma .py{color:#111}[data-scheme=light] .chroma .nt{color:#f92672}[data-scheme=light] .chroma .nv{color:#111}[data-scheme=light] .chroma .vc{color:#111}[data-scheme=light] .chroma .vg{color:#111}[data-scheme=light] .chroma .vi{color:#111}[data-scheme=light] .chroma .vm{color:#111}[data-scheme=light] .chroma .l{color:#ae81ff}[data-scheme=light] .chroma .ld{color:#d88200}[data-scheme=light] .chroma .s{color:#d88200}[data-scheme=light] .chroma .sa{color:#d88200}[data-scheme=light] .chroma .sb{color:#d88200}[data-scheme=light] .chroma .sc{color:#d88200}[data-scheme=light] .chroma .dl{color:#d88200}[data-scheme=light] .chroma .sd{color:#d88200}[data-scheme=light] .chroma .s2{color:#d88200}[data-scheme=light] .chroma .se{color:#ae81ff}[data-scheme=light] .chroma .sh{color:#d88200}[data-scheme=light] .chroma .si{color:#d88200}[data-scheme=light] .chroma .sx{color:#d88200}[data-scheme=light] .chroma .sr{color:#d88200}[data-scheme=light] .chroma .s1{color:#d88200}[data-scheme=light] .chroma .ss{color:#d88200}[data-scheme=light] .chroma .m{color:#ae81ff}[data-scheme=light] .chroma .mb{color:#ae81ff}[data-scheme=light] .chroma .mf{color:#ae81ff}[data-scheme=light] .chroma .mh{color:#ae81ff}[data-scheme=light] .chroma .mi{color:#ae81ff}[data-scheme=light] .chroma .il{color:#ae81ff}[data-scheme=light] .chroma .mo{color:#ae81ff}[data-scheme=light] .chroma .o{color:#f92672}[data-scheme=light] .chroma .ow{color:#f92672}[data-scheme=light] .chroma .p{color:#111}[data-scheme=light] .chroma .c{color:#75715e}[data-scheme=light] .chroma .ch{color:#75715e}[data-scheme=light] .chroma .cm{color:#75715e}[data-scheme=light] .chroma .c1{color:#75715e}[data-scheme=light] .chroma .cs{color:#75715e}[data-scheme=light] .chroma .cp{color:#75715e}[data-scheme=light] .chroma .cpf{color:#75715e}[data-scheme=light] .chroma .gd{color:#f92672}[data-scheme=light] .chroma .ge{font-style:italic}[data-scheme=light] .chroma .gi{color:#75af00}[data-scheme=light] .chroma .gs{font-weight:700}[data-scheme=light] .chroma .gu{color:#75715e}[data-scheme=dark]{--pre-text-color:#f8f8f2;--pre-background-color:#272822}[data-scheme=dark] .chroma{color:#f8f8f2;background-color:#272822}[data-scheme=dark] .chroma .err{color:#bb0064}[data-scheme=dark] .chroma .lntd{vertical-align:top;padding:0;margin:0;border:0}[data-scheme=dark] .chroma .lntable{border-spacing:0;padding:0;margin:0;border:0;width:100%;display:block}[data-scheme=dark] .chroma .lntable>tbody{display:block;width:100%}[data-scheme=dark] .chroma .lntable>tbody>tr{display:flex;width:100%}[data-scheme=dark] .chroma .lntable>tbody>tr>td:last-child{overflow-x:auto}[data-scheme=dark] .chroma .hl{display:block;width:100%;background-color:#ffc}[data-scheme=dark] .chroma .lnt{margin-right:.4em;padding:0 .4em;color:#7f7f7f}[data-scheme=dark] .chroma .ln{margin-right:.4em;padding:0 .4em;color:#7f7f7f}[data-scheme=dark] .chroma .k{color:#66d9ef}[data-scheme=dark] .chroma .kc{color:#66d9ef}[data-scheme=dark] .chroma .kd{color:#66d9ef}[data-scheme=dark] .chroma .kn{color:#f92672}[data-scheme=dark] .chroma .kp{color:#66d9ef}[data-scheme=dark] .chroma .kr{color:#66d9ef}[data-scheme=dark] .chroma .kt{color:#66d9ef}[data-scheme=dark] .chroma .n{color:#f8f8f2}[data-scheme=dark] .chroma .na{color:#a6e22e}[data-scheme=dark] .chroma .nb{color:#f8f8f2}[data-scheme=dark] .chroma .bp{color:#f8f8f2}[data-scheme=dark] .chroma .nc{color:#a6e22e}[data-scheme=dark] .chroma .no{color:#66d9ef}[data-scheme=dark] .chroma .nd{color:#a6e22e}[data-scheme=dark] .chroma .ni{color:#f8f8f2}[data-scheme=dark] .chroma .ne{color:#a6e22e}[data-scheme=dark] .chroma .nf{color:#a6e22e}[data-scheme=dark] .chroma .fm{color:#f8f8f2}[data-scheme=dark] .chroma .nl{color:#f8f8f2}[data-scheme=dark] .chroma .nn{color:#f8f8f2}[data-scheme=dark] .chroma .nx{color:#a6e22e}[data-scheme=dark] .chroma .py{color:#f8f8f2}[data-scheme=dark] .chroma .nt{color:#f92672}[data-scheme=dark] .chroma .nv{color:#f8f8f2}[data-scheme=dark] .chroma .vc{color:#f8f8f2}[data-scheme=dark] .chroma .vg{color:#f8f8f2}[data-scheme=dark] .chroma .vi{color:#f8f8f2}[data-scheme=dark] .chroma .vm{color:#f8f8f2}[data-scheme=dark] .chroma .l{color:#ae81ff}[data-scheme=dark] .chroma .ld{color:#e6db74}[data-scheme=dark] .chroma .s{color:#e6db74}[data-scheme=dark] .chroma .sa{color:#e6db74}[data-scheme=dark] .chroma .sb{color:#e6db74}[data-scheme=dark] .chroma .sc{color:#e6db74}[data-scheme=dark] .chroma .dl{color:#e6db74}[data-scheme=dark] .chroma .sd{color:#e6db74}[data-scheme=dark] .chroma .s2{color:#e6db74}[data-scheme=dark] .chroma .se{color:#ae81ff}[data-scheme=dark] .chroma .sh{color:#e6db74}[data-scheme=dark] .chroma .si{color:#e6db74}[data-scheme=dark] .chroma .sx{color:#e6db74}[data-scheme=dark] .chroma .sr{color:#e6db74}[data-scheme=dark] .chroma .s1{color:#e6db74}[data-scheme=dark] .chroma .ss{color:#e6db74}[data-scheme=dark] .chroma .m{color:#ae81ff}[data-scheme=dark] .chroma .mb{color:#ae81ff}[data-scheme=dark] .chroma .mf{color:#ae81ff}[data-scheme=dark] .chroma .mh{color:#ae81ff}[data-scheme=dark] .chroma .mi{color:#ae81ff}[data-scheme=dark] .chroma .il{color:#ae81ff}[data-scheme=dark] .chroma .mo{color:#ae81ff}[data-scheme=dark] .chroma .o{color:#f92672}[data-scheme=dark] .chroma .ow{color:#f92672}[data-scheme=dark] .chroma .p{color:#f8f8f2}[data-scheme=dark] .chroma .c{color:#75715e}[data-scheme=dark] .chroma .ch{color:#75715e}[data-scheme=dark] .chroma .cm{color:#75715e}[data-scheme=dark] .chroma .c1{color:#75715e}[data-scheme=dark] .chroma .cs{color:#75715e}[data-scheme=dark] .chroma .cp{color:#75715e}[data-scheme=dark] .chroma .cpf{color:#75715e}[data-scheme=dark] .chroma .gd{color:#f92672}[data-scheme=dark] .chroma .ge{font-style:italic}[data-scheme=dark] .chroma .gi{color:#a6e22e}[data-scheme=dark] .chroma .gs{font-weight:700}[data-scheme=dark] .chroma .gu{color:#75715e}:root{--menu-icon-separation:40px;--container-padding:15px;--widget-separation:var(--section-separation)}.container{margin-left:auto;margin-right:auto}.container .left-sidebar{order:-3;max-width:var(--left-sidebar-max-width)}.container .right-sidebar{order:-1;max-width:var(--right-sidebar-max-width)}@media(min-width:1024px){.container .right-sidebar{display:flex}}@media(min-width:768px){.container.extended{max-width:1024px;--left-sidebar-max-width:25%;--right-sidebar-max-width:30%}}@media(min-width:1024px){.container.extended{max-width:1280px;--left-sidebar-max-width:20%;--right-sidebar-max-width:30%}}@media(min-width:1280px){.container.extended{max-width:1536px;--left-sidebar-max-width:15%;--right-sidebar-max-width:25%}}@media(min-width:768px){.container.compact{--left-sidebar-max-width:25%;max-width:768px}}@media(min-width:1024px){.container.compact{max-width:1024px;--left-sidebar-max-width:20%}}@media(min-width:1280px){.container.compact{max-width:1280px}}.flex{display:flex;flex-direction:row}.flex.column{flex-direction:column}.flex.on-phone--column{flex-direction:column}@media(min-width:768px){.flex.on-phone--column{flex-direction:unset}}.flex .full-width{width:100%}main.main{order:-2;min-width:0;max-width:100%;flex-grow:1;display:flex;flex-direction:column;gap:var(--section-separation)}@media(min-width:768px){main.main{padding-top:var(--main-top-padding)}}.main-container{min-height:100vh;align-items:flex-start;padding:0 15px;gap:var(--section-separation);padding-top:var(--main-top-padding)}@media(min-width:768px){.main-container{padding:0 20px}}/*!normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css*/html{line-height:1.15;-webkit-text-size-adjust:100%}body{margin:0}main{display:block}h1{font-size:2em;margin:.67em 0}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button}button::-moz-focus-inner,[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner{border-style:none;padding:0}button:-moz-focusring,[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:.35em .75em .625em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details{display:block}summary{display:list-item}template{display:none}[hidden]{display:none}/*!* Hamburgers +* @description Tasty CSS-animated hamburgers +* @author Jonathan Suh @jonsuh +* @site https://jonsuh.com/hamburgers +* @link https://github.com/jonsuh/hamburgers*/.hamburger{padding-top:10px;display:inline-block;cursor:pointer;transition-property:opacity,filter;transition-duration:.15s;transition-timing-function:linear;font:inherit;color:inherit;text-transform:none;background-color:transparent;border:0;margin:0;overflow:visible}.hamburger:hover{opacity:.7}.hamburger.is-active:hover{opacity:.7}.hamburger.is-active .hamburger-inner,.hamburger.is-active .hamburger-inner::before,.hamburger.is-active .hamburger-inner::after{background-color:#000}.hamburger-box{width:30px;height:24px;display:inline-block;position:relative}.hamburger-inner{display:block;top:50%;margin-top:-2px}.hamburger-inner,.hamburger-inner::before,.hamburger-inner::after{width:30px;height:2px;background-color:var(--card-text-color-main);border-radius:4px;position:absolute;transition-property:transform;transition-duration:.15s;transition-timing-function:ease}.hamburger-inner::before,.hamburger-inner::after{content:"";display:block}.hamburger-inner::before{top:-10px}.hamburger-inner::after{bottom:-10px}.hamburger--spin .hamburger-inner{transition-duration:.22s;transition-timing-function:cubic-bezier(.55,.055,.675,.19)}.hamburger--spin .hamburger-inner::before{transition:top .1s .25s ease-in,opacity .1s ease-in}.hamburger--spin .hamburger-inner::after{transition:bottom .1s .25s ease-in,transform .22s cubic-bezier(.55,.055,.675,.19)}.hamburger--spin.is-active .hamburger-inner{transform:rotate(225deg);transition-delay:.12s;transition-timing-function:cubic-bezier(.215,.61,.355,1)}.hamburger--spin.is-active .hamburger-inner::before{top:0;opacity:0;transition:top .1s ease-out,opacity .1s .12s ease-out}.hamburger--spin.is-active .hamburger-inner::after{bottom:0;transform:rotate(-90deg);transition:bottom .1s ease-out,transform .22s .12s cubic-bezier(.215,.61,.355,1)}#toggle-menu{background:0 0;border:none;position:absolute;right:0;top:0;z-index:2;cursor:pointer;outline:none}[dir=rtl] #toggle-menu{left:0;right:auto}@media(min-width:768px){#toggle-menu{display:none}}#toggle-menu.is-active .hamburger-inner,#toggle-menu.is-active .hamburger-inner::before,#toggle-menu.is-active .hamburger-inner::after{background-color:var(--accent-color)}.menu{padding-left:0;list-style:none;flex-direction:column;overflow-y:auto;flex-grow:1;font-size:1.4rem;background-color:var(--card-background);box-shadow:var(--shadow-l1);display:none;margin:0 calc(var(--container-padding) * -1);padding:30px}@media(min-width:1280px){.menu{padding:15px 0}}.menu,.menu .menu-bottom-section{gap:30px}@media(min-width:1280px){.menu,.menu .menu-bottom-section{gap:25px}}.menu.show{display:flex}@media(min-width:768px){.menu{align-items:flex-end;display:flex;background-color:transparent;padding:0;box-shadow:none;margin:0}}.menu li{position:relative;vertical-align:middle;padding:0}@media(min-width:768px){.menu li{width:100%}}.menu li svg{stroke-width:1.33;width:20px;height:20px}.menu li a{height:100%;display:inline-flex;align-items:center;color:var(--body-text-color);gap:var(--menu-icon-separation)}.menu li span{flex:1}.menu li.current a{color:var(--accent-color);font-weight:700}.menu .menu-bottom-section{margin-top:auto;display:flex;flex-direction:column;width:100%}.social-menu{list-style:none;padding:0;margin:0;display:flex;flex-direction:row;gap:10px}.social-menu svg{width:24px;height:24px;stroke:var(--body-text-color);stroke-width:1.33}.article-list{display:flex;flex-direction:column;gap:var(--section-separation)}.article-list article{display:flex;flex-direction:column;background-color:var(--card-background);box-shadow:var(--shadow-l1);border-radius:var(--card-border-radius);overflow:hidden;transition:box-shadow .3s ease}.article-list article:hover{box-shadow:var(--shadow-l2)}.article-list article .article-image img{width:100%;height:150px;object-fit:cover}@media(min-width:768px){.article-list article .article-image img{height:200px}}@media(min-width:1280px){.article-list article .article-image img{height:250px}}.article-list article:nth-child(5n+1) .article-category a{background:#8ea885;color:#fff}.article-list article:nth-child(5n+2) .article-category a{background:#df7988;color:#fff}.article-list article:nth-child(5n+3) .article-category a{background:#0177b8;color:#fff}.article-list article:nth-child(5n+4) .article-category a{background:#ffb900;color:#fff}.article-list article:nth-child(5n+5) .article-category a{background:#6b69d6;color:#fff}.article-details{display:flex;flex-direction:column;justify-content:center;padding:var(--card-padding);gap:15px}.article-title{font-weight:600;margin:0;color:var(--card-text-color-main);font-size:2.2rem}@media(min-width:1280px){.article-title{font-size:2.4rem}}.article-title a{color:var(--card-text-color-main)}.article-title a:hover{color:var(--card-text-color-main)}.article-subtitle{font-weight:400;color:var(--card-text-color-secondary);line-height:1.5;margin:0;font-size:1.75rem}@media(min-width:1280px){.article-subtitle{font-size:2rem}}.article-title-wrapper{display:flex;flex-direction:column;gap:8px}.article-time,.article-translations{display:flex;align-items:center;color:var(--card-text-color-tertiary);gap:15px;flex-wrap:wrap}.article-time svg,.article-translations svg{vertical-align:middle;width:20px;height:20px;stroke-width:1.33}.article-time time,.article-time a,.article-translations time,.article-translations a{font-size:1.4rem;color:var(--card-text-color-tertiary)}.article-time>div,.article-translations>div{display:inline-flex;align-items:center;gap:15px}.article-category,.article-tags{display:flex;gap:10px}.article-category a,.article-tags a{color:var(--accent-color-text);background-color:var(--accent-color);padding:8px 16px;border-radius:var(--tag-border-radius);display:inline-block;font-size:1.4rem;transition:background-color .5s ease}.article-category a:hover,.article-tags a:hover{color:var(--accent-color-text);background-color:var(--accent-color-darker)}.article-list--compact{border-radius:var(--card-border-radius);box-shadow:var(--shadow-l1);background-color:var(--card-background);--image-size:50px}@media(min-width:768px){.article-list--compact{--image-size:60px}}.article-list--compact article>a{display:flex;align-items:center;padding:var(--small-card-padding);gap:15px}.article-list--compact article:not(:last-of-type){border-bottom:1.5px solid var(--card-separator-color)}.article-list--compact article .article-details{flex-grow:1;padding:0;min-height:var(--image-size);gap:10px}.article-list--compact article .article-title{margin:0;font-size:1.6rem}@media(min-width:768px){.article-list--compact article .article-title{font-size:1.8rem}}.article-list--compact article .article-image img{width:var(--image-size);height:var(--image-size);object-fit:cover}.article-list--compact article .article-time{font-size:1.4rem}.article-list--compact article .article-preview{font-size:1.4rem;color:var(--card-text-color-tertiary);margin-top:10px;line-height:1.5}.article-list--tile article{border-radius:var(--card-border-radius);overflow:hidden;position:relative;height:350px;width:250px;box-shadow:var(--shadow-l1);transition:box-shadow .3s ease;background-color:var(--card-background)}.article-list--tile article:hover{box-shadow:var(--shadow-l2)}.article-list--tile article.has-image .article-details{background-color:rgba(0,0,0,.25)}.article-list--tile article.has-image .article-title{color:#fff}.article-list--tile article .article-image{position:absolute;top:0;left:0;width:100%;height:100%}.article-list--tile article .article-image img{width:100%;height:100%;object-fit:cover}.article-list--tile article .article-details{border-radius:var(--card-border-radius);position:relative;height:100%;width:100%;display:flex;flex-direction:column;justify-content:flex-end;z-index:2;padding:15px}@media(min-width:640px){.article-list--tile article .article-details{padding:20px}}.article-list--tile article .article-title{font-size:2rem;font-weight:500;color:var(--card-text-color-main)}@media(min-width:640px){.article-list--tile article .article-title{font-size:2.2rem}}.widget{display:flex;flex-direction:column}.widget .widget-icon svg{width:32px;height:32px;stroke-width:1.6;color:var(--body-text-color)}.tagCloud .tagCloud-tags{display:flex;flex-wrap:wrap;gap:10px}.tagCloud .tagCloud-tags a{background:var(--card-background);box-shadow:var(--shadow-l1);border-radius:var(--tag-border-radius);padding:8px 20px;color:var(--card-text-color-main);font-size:1.4rem;transition:box-shadow .3s ease}.tagCloud .tagCloud-tags a:hover{box-shadow:var(--shadow-l2)}.widget.archives .widget-archive--list{border-radius:var(--card-border-radius);box-shadow:var(--shadow-l1);background-color:var(--card-background)}.widget.archives .archives-year:not(:last-of-type){border-bottom:1.5px solid var(--card-separator-color)}.widget.archives .archives-year a{font-size:1.4rem;padding:18px 25px;display:flex}.widget.archives .archives-year a span.year{flex:1;color:var(--card-text-color-main);font-weight:700}.widget.archives .archives-year a span.count{color:var(--card-text-color-tertiary)}footer.site-footer{padding:20px 0 var(--section-separation);font-size:1.4rem;line-height:1.75}footer.site-footer:before{content:"";display:block;height:3px;width:50px;background:var(--body-text-color);margin-bottom:20px}footer.site-footer .copyright{color:var(--accent-color);font-weight:700;margin-bottom:5px}footer.site-footer .powerby{color:var(--body-text-color);font-weight:400;font-size:1.2rem}footer.site-footer .powerby a{color:var(--body-text-color)}.pagination{display:flex;background-color:var(--card-background);box-shadow:var(--shadow-l1);border-radius:var(--card-border-radius);overflow:hidden;flex-wrap:wrap}.pagination .page-link{padding:16px 32px;display:inline-flex;color:var(--card-text-color-secondary)}.pagination .page-link.current{font-weight:700;background-color:var(--card-background-selected);color:var(--card-text-color-main)}@media(min-width:768px){.sidebar.sticky{position:sticky}}.left-sidebar{display:flex;flex-direction:column;flex-shrink:0;align-self:stretch;gap:var(--sidebar-element-separation);max-width:none;width:100%;position:relative;--sidebar-avatar-size:100px;--sidebar-element-separation:20px;--emoji-size:40px;--emoji-font-size:20px}@media(min-width:768px){.left-sidebar{width:auto;padding-top:var(--main-top-padding);padding-bottom:var(--main-top-padding);max-height:100vh}}@media(min-width:1536px){.left-sidebar{--sidebar-avatar-size:120px;--sidebar-element-separation:25px;--emoji-size:40px}}.left-sidebar.sticky{top:0}.left-sidebar.compact{--sidebar-avatar-size:80px;--emoji-size:30px;--emoji-font-size:15px}@media(min-width:1024px){.left-sidebar.compact header{flex-direction:row}}.left-sidebar.compact header .site-meta{gap:5px}.left-sidebar.compact header .site-name{font-size:1.4rem}@media(min-width:1536px){.left-sidebar.compact header .site-name{font-size:1.75rem}}.left-sidebar.compact header .site-description{font-size:1.4rem}.right-sidebar{width:100%;display:none;flex-direction:column;gap:var(--widget-separation)}.right-sidebar.sticky{top:0}@media(min-width:1024px){.right-sidebar{padding-top:var(--main-top-padding)}}.sidebar header{z-index:1;transition:box-shadow .5s ease;display:flex;flex-direction:column;gap:var(--sidebar-element-separation)}@media(min-width:768px){.sidebar header{padding:0}}.sidebar header .site-avatar{position:relative;margin:0;width:var(--sidebar-avatar-size);height:var(--sidebar-avatar-size);flex-shrink:0}.sidebar header .site-avatar .site-logo{width:100%;height:100%;border-radius:100%;box-shadow:var(--shadow-l1)}.sidebar header .site-avatar .emoji{position:absolute;width:var(--emoji-size);height:var(--emoji-size);line-height:var(--emoji-size);border-radius:100%;bottom:0;right:0;text-align:center;font-size:var(--emoji-font-size);background-color:var(--card-background);box-shadow:var(--shadow-l2)}.sidebar header .site-meta{display:flex;flex-direction:column;gap:10px;justify-content:center}.sidebar header .site-name{color:var(--accent-color);margin:0;font-size:1.6rem}@media(min-width:1536px){.sidebar header .site-name{font-size:1.8rem}}.sidebar header .site-description{color:var(--body-text-color);font-weight:400;margin:0;font-size:1.4rem}@media(min-width:1536px){.sidebar header .site-description{font-size:1.6rem}}[data-scheme=dark] #dark-mode-toggle{color:var(--accent-color);font-weight:700}[data-scheme=dark] #dark-mode-toggle .icon-tabler-toggle-left{display:none}[data-scheme=dark] #dark-mode-toggle .icon-tabler-toggle-right{display:unset}#dark-mode-toggle{margin-top:auto;color:var(--body-text-color);display:flex;align-items:center;cursor:pointer;gap:var(--menu-icon-separation)}#dark-mode-toggle .icon-tabler-toggle-right{display:none}#i18n-switch{color:var(--body-text-color);display:inline-flex;align-content:center;gap:var(--menu-icon-separation)}#i18n-switch select{border:0;background-color:transparent;color:var(--body-text-color)}#i18n-switch select option{color:var(--card-text-color-main);background-color:var(--card-background)}html{font-size:62.5%;overflow-y:scroll}*{box-sizing:border-box}body{background:var(--body-background);margin:0;font-family:var(--base-font-family);font-size:1.6rem;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}*{scrollbar-width:auto;scrollbar-color:var(--scrollbar-thumb)transparent}::-webkit-scrollbar{height:auto}::-webkit-scrollbar-thumb{background-color:var(--scrollbar-thumb)}::-webkit-scrollbar-track{background-color:transparent}.article-page.hide-sidebar-sm .left-sidebar{display:none}@media(min-width:768px){.article-page.hide-sidebar-sm .left-sidebar{display:inherit}}.article-page .main-article{background:var(--card-background);border-radius:var(--card-border-radius);box-shadow:var(--shadow-l1);overflow:hidden}.article-page .main-article .article-header .article-image img{height:auto;width:100%;max-height:50vh;object-fit:cover}.article-page .main-article .article-header .article-details{padding:var(--card-padding);padding-bottom:0}.article-page .main-article .article-content{margin:var(--card-padding)0;color:var(--card-text-color-main)}.article-page .main-article .article-content img{max-width:100%;height:auto}.article-page .main-article .article-footer{margin:var(--card-padding);margin-top:0}.article-page .main-article .article-footer section:not(:first-child){margin-top:var(--card-padding)}.article-page .main-article .article-footer section{color:var(--card-text-color-tertiary);text-transform:uppercase;display:flex;align-items:center;font-size:1.4rem;gap:15px}.article-page .main-article .article-footer section svg{width:20px;height:20px;stroke-width:1.33}.article-page .main-article .article-footer .article-tags{flex-wrap:wrap;text-transform:unset}.article-page .main-article .article-footer .article-copyright a,.article-page .main-article .article-footer .article-lastmod a{color:var(--body-text-color)}.article-page .main-article .article-footer .article-copyright a.link,.article-page .main-article .article-footer .article-lastmod a.link{box-shadow:unset}.widget--toc{background-color:var(--card-background);border-radius:var(--card-border-radius);box-shadow:var(--shadow-l1);display:flex;flex-direction:column;color:var(--card-text-color-main);overflow:hidden}.widget--toc ::-webkit-scrollbar-thumb{background-color:var(--card-separator-color)}.widget--toc #TableOfContents{overflow-x:auto;max-height:75vh}.widget--toc #TableOfContents ol,.widget--toc #TableOfContents ul{margin:0;padding:0}.widget--toc #TableOfContents ol{list-style-type:none;counter-reset:item}.widget--toc #TableOfContents ol li a:first-of-type::before{counter-increment:item;content:counters(item,".")". ";font-weight:700;margin-right:5px}.widget--toc #TableOfContents>ul{padding:0 1em}.widget--toc #TableOfContents li{margin:15px 0 15px 20px;padding:5px}.widget--toc #TableOfContents li>ol,.widget--toc #TableOfContents li>ul{margin-top:10px;padding-left:10px;margin-bottom:-5px}.widget--toc #TableOfContents li>ol>li:last-child,.widget--toc #TableOfContents li>ul>li:last-child{margin-bottom:0}.widget--toc #TableOfContents li.active-class>a{border-left:var(--heading-border-size)solid var(--accent-color);font-weight:700}.widget--toc #TableOfContents ul li.active-class>a{display:block}.widget--toc #TableOfContents>ul>li.active-class>a{margin-left:calc(-25px - 1em);padding-left:calc(25px + 1em - var(--heading-border-size))}.widget--toc #TableOfContents>ol>li.active-class>a{margin-left:calc(-9px - 1em);padding-left:calc(9px + 1em - var(--heading-border-size));display:block}.widget--toc #TableOfContents>ul>li>ul>li.active-class>a{margin-left:calc(-60px - 1em);padding-left:calc(60px + 1em - var(--heading-border-size))}.widget--toc #TableOfContents>ol>li>ol>li.active-class>a{margin-left:calc(-44px - 1em);padding-left:calc(44px + 1em - var(--heading-border-size));display:block}.widget--toc #TableOfContents>ul>li>ul>li>ul>li.active-class>a{margin-left:calc(-95px - 1em);padding-left:calc(95px + 1em - var(--heading-border-size))}.widget--toc #TableOfContents>ol>li>ol>li>ol>li.active-class>a{margin-left:calc(-79px - 1em);padding-left:calc(79px + 1em - var(--heading-border-size));display:block}.widget--toc #TableOfContents>ul>li>ul>li>ul>li>ul>li.active-class>a{margin-left:calc(-130px - 1em);padding-left:calc(130px + 1em - var(--heading-border-size))}.widget--toc #TableOfContents>ol>li>ol>li>ol>li>ol>li.active-class>a{margin-left:calc(-114px - 1em);padding-left:calc(114px + 1em - var(--heading-border-size));display:block}.widget--toc #TableOfContents>ul>li>ul>li>ul>li>ul>li>ul>li.active-class>a{margin-left:calc(-165px - 1em);padding-left:calc(165px + 1em - var(--heading-border-size))}.widget--toc #TableOfContents>ol>li>ol>li>ol>li>ol>li>ol>li.active-class>a{margin-left:calc(-149px - 1em);padding-left:calc(149px + 1em - var(--heading-border-size));display:block}.related-content{overflow-x:auto;padding-bottom:15px}.related-content>.flex{float:left}.related-content article{margin-right:15px;flex-shrink:0;overflow:hidden;width:250px;height:150px}.related-content article .article-title{font-size:1.8rem;margin:0}.related-content article.has-image .article-details{padding:20px;background:linear-gradient(0deg,rgba(0,0,0,.25) 0%,rgba(0,0,0,.75) 100%)}.article-content{font-family:var(--article-font-family);font-size:var(--article-font-size);padding:0 var(--card-padding);line-height:var(--article-line-height)}.article-content>p{margin:1.5em 0}.article-content h1,.article-content h2,.article-content h3,.article-content h4,.article-content h5,.article-content h6{margin-inline-start:calc((var(--card-padding)) * -1);padding-inline-start:calc(var(--card-padding) - var(--heading-border-size));border-inline-start:var(--heading-border-size)solid var(--accent-color)}.article-content figure{text-align:center}.article-content figure figcaption{font-size:1.4rem;color:var(--card-text-color-secondary)}.article-content blockquote{position:relative;margin:1.5em 0;border-inline-start:var(--blockquote-border-size)solid var(--card-separator-color);padding:15px calc(var(--card-padding) - var(--blockquote-border-size));background-color:var(--blockquote-background-color)}.article-content blockquote .cite{display:block;text-align:right;font-size:.75em}.article-content blockquote .cite a{text-decoration:underline}.article-content hr{width:100px;margin:40px auto;background:var(--card-text-color-tertiary);height:2px;border:0;opacity:.55}.article-content code{color:var(--code-text-color);background-color:var(--code-background-color);padding:2px 4px;border-radius:var(--tag-border-radius);font-family:var(--code-font-family)}.article-content .gallery{position:relative;display:flex;flex-direction:row;justify-content:center;margin:1.5em 0;gap:10px}.article-content .gallery figure{margin:0}.article-content pre{overflow-x:auto;display:block;background-color:var(--pre-background-color);color:var(--pre-text-color);font-family:var(--code-font-family);line-height:1.428571429;word-break:break-all;padding:var(--card-padding)}[dir=rtl] .article-content pre{direction:ltr}.article-content pre code{color:unset;border:none;background:0 0;padding:0}.article-content .highlight{background-color:var(--pre-background-color);padding:var(--card-padding);position:relative}.article-content .highlight:hover .copyCodeButton{opacity:1}[dir=rtl] .article-content .highlight{direction:ltr}.article-content .highlight pre{margin:initial;padding:0;margin:0;width:auto}.article-content .copyCodeButton{position:absolute;top:calc(var(--card-padding));right:calc(var(--card-padding));background:var(--card-background);border:none;box-shadow:var(--shadow-l2);border-radius:var(--tag-border-radius);padding:8px 16px;color:var(--card-text-color-main);cursor:pointer;font-size:14px;opacity:0;transition:opacity .3s ease}.article-content .table-wrapper{padding:0 var(--card-padding);overflow-x:auto;display:block}.article-content table{width:100%;border-collapse:collapse;border-spacing:0;margin-bottom:1.5em;font-size:.96em}.article-content th,.article-content td{text-align:left;padding:4px 8px 4px 10px;border:1px solid var(--table-border-color)}.article-content td{vertical-align:top}.article-content tr:nth-child(even){background-color:var(--tr-even-background-color)}.article-content .twitter-tweet{color:var(--card-text-color-main)}.article-content .video-wrapper{position:relative;width:100%;height:0;padding-bottom:56.25%;overflow:hidden}.article-content .video-wrapper>iframe,.article-content .video-wrapper>video{position:absolute;width:100%;height:100%;left:0;top:0;border:0}.article-content .gitlab-embed-snippets{margin:0!important}.article-content .gitlab-embed-snippets .file-holder.snippet-file-content{margin-block-end:0!important;margin-block-start:0!important;margin-left:calc((var(--card-padding)) * -1)!important;margin-right:calc((var(--card-padding)) * -1)!important;padding:0 var(--card-padding)!important}.article-content blockquote,.article-content figure,.article-content .highlight,.article-content pre,.article-content .gallery,.article-content .video-wrapper,.article-content .table-wrapper,.article-content .s_video_simple{margin-left:calc((var(--card-padding)) * -1);margin-right:calc((var(--card-padding)) * -1);width:calc(100% + var(--card-padding) * 2)}.article-content .katex-display>.katex{overflow-x:auto;overflow-y:hidden}.section-card{border-radius:var(--card-border-radius);background-color:var(--card-background);padding:var(--small-card-padding);box-shadow:var(--shadow-l1);display:flex;align-items:center;gap:20px;--separation:15px}.section-card .section-term{font-size:2.2rem;margin:0;color:var(--card-text-color-main)}.section-card .section-description{font-weight:400;color:var(--card-text-color-secondary);font-size:1.6rem;margin:0}.section-card .section-details{flex-grow:1;display:flex;flex-direction:column;gap:8px}.section-card .section-image img{width:60px;height:60px}.section-card .section-count{color:var(--card-text-color-tertiary);font-size:1.4rem;margin:0;font-weight:700;text-transform:uppercase}.subsection-list{overflow-x:auto}.subsection-list .article-list--tile{display:flex;padding-bottom:15px}.subsection-list .article-list--tile article{width:250px;height:150px;margin-right:20px;flex-shrink:0}.subsection-list .article-list--tile article .article-title{margin:0;font-size:1.8rem}.subsection-list .article-list--tile article .article-details{padding:20px}.not-found-card{background-color:var(--card-background);box-shadow:var(--shadow-l1);border-radius:var(--card-border-radius);padding:var(--card-padding)}.search-form{position:relative;--button-size:80px}.search-form.widget{--button-size:60px}.search-form.widget label{font-size:1.3rem;top:10px}.search-form.widget input{font-size:1.5rem;padding:30px 20px 15px}.search-form p{position:relative;margin:0}.search-form label{position:absolute;top:15px;inset-inline-start:20px;font-size:1.4rem;color:var(--card-text-color-tertiary)}.search-form input{padding:40px 20px 20px;border-radius:var(--card-border-radius);background-color:var(--card-background);box-shadow:var(--shadow-l1);color:var(--card-text-color-main);width:100%;border:0;-webkit-appearance:none;transition:box-shadow .3s ease;font-size:1.8rem}.search-form input:focus{outline:0;box-shadow:var(--shadow-l2)}.search-form button{position:absolute;inset-inline-end:0;top:0;height:100%;width:var(--button-size);cursor:pointer;background-color:transparent;border:0;padding:0 10px}.search-form button:focus{outline:0}.search-form button:focus svg{stroke-width:2;color:var(--accent-color)}.search-form button svg{color:var(--card-text-color-secondary);stroke-width:1.33;transition:all .3s ease;width:20px;height:20px}a{text-decoration:none;color:var(--accent-color)}a:hover{color:var(--accent-color-darker)}a.link{box-shadow:0 -2px rgba(var(--link-background-color),var(--link-background-opacity))inset;transition:all .3s ease}a.link:hover{box-shadow:0 calc(-1rem * var(--article-line-height))rgba(var(--link-background-color),var(--link-background-opacity-hover))inset}.section-title{text-transform:uppercase;margin-top:0;margin-bottom:10px;display:block;font-size:1.6rem;font-weight:700;color:var(--body-text-color)}.section-title a{color:var(--body-text-color)} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 00000000..8e0e2d74 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1 @@ +https://open-neuromorphic.org/2023-08-02T00:00:00+00:00https://open-neuromorphic.org/tags/framework/2023-08-02T00:00:00+00:00https://open-neuromorphic.org/tags/library/2023-08-02T00:00:00+00:00https://open-neuromorphic.org/post/2023-08-02T00:00:00+00:00https://open-neuromorphic.org/tags/pytorch/2023-08-02T00:00:00+00:00https://open-neuromorphic.org/tags/snn/2023-08-02T00:00:00+00:00https://open-neuromorphic.org/p/snn-library-benchmarks/2023-08-02T00:00:00+00:00https://open-neuromorphic.org/tags/2023-08-02T00:00:00+00:00https://open-neuromorphic.org/p/bits-of-chips-truenorth/2023-03-27T00:00:00+00:00https://open-neuromorphic.org/tags/digital/2023-03-27T00:00:00+00:00https://open-neuromorphic.org/tags/hardware/2023-03-27T00:00:00+00:00https://open-neuromorphic.org/tags/neuromorphic/2023-03-27T00:00:00+00:00https://open-neuromorphic.org/tags/research/2023-03-27T00:00:00+00:00https://open-neuromorphic.org/tags/compression/2023-02-28T00:00:00+00:00https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/2023-02-28T00:00:00+00:00https://open-neuromorphic.org/tags/event-camera/2023-02-28T00:00:00+00:00https://open-neuromorphic.org/tags/events/2023-02-28T00:00:00+00:00https://open-neuromorphic.org/tags/file-encoding/2023-02-28T00:00:00+00:00https://open-neuromorphic.org/tags/ai/2023-01-11T00:00:00+00:00https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/2023-01-11T00:00:00+00:00https://open-neuromorphic.org/tags/machine-learning/2023-01-02T00:00:00+00:00https://open-neuromorphic.org/tags/rtl/2023-01-02T00:00:00+00:00https://open-neuromorphic.org/tags/spiking/2023-01-02T00:00:00+00:00https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/2023-01-02T00:00:00+00:00https://open-neuromorphic.org/tags/verilog/2023-01-02T00:00:00+00:00https://open-neuromorphic.org/p/open-neuromorphic/2022-12-21T00:00:00+00:00https://open-neuromorphic.org/about/https://open-neuromorphic.org/categories/https://open-neuromorphic.org/events/https://open-neuromorphic.org/events-recordings/https://open-neuromorphic.org/categories/example-category/https://open-neuromorphic.org/getting-involved/https://open-neuromorphic.org/page/https://open-neuromorphic.org/resources/https://open-neuromorphic.org/team/ \ No newline at end of file diff --git a/tags/ai/index.html b/tags/ai/index.html new file mode 100644 index 00000000..1ce5fd7d --- /dev/null +++ b/tags/ai/index.html @@ -0,0 +1,5 @@ +Tag: AI - Open Neuromorphic

Tags

2 pages

AI

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/ai/index.xml b/tags/ai/index.xml new file mode 100644 index 00000000..3c2e0c06 --- /dev/null +++ b/tags/ai/index.xml @@ -0,0 +1,4 @@ +AI on Open Neuromorphichttps://open-neuromorphic.org/tags/ai/Recent content in AI on Open NeuromorphicHugo -- gohugo.ioen-usWed, 11 Jan 2023 00:00:00 +0000Digital neuromophic hardware read listhttps://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/<img src="https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png" alt="Featured image of post Digital neuromophic hardware read list" />Here&rsquo;s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles! +If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here. +2015 TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al.Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer. +In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron. \ No newline at end of file diff --git a/tags/ai/page/1/index.html b/tags/ai/page/1/index.html new file mode 100644 index 00000000..6f24b075 --- /dev/null +++ b/tags/ai/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/ai/ \ No newline at end of file diff --git a/tags/compression/index.html b/tags/compression/index.html new file mode 100644 index 00000000..406b003b --- /dev/null +++ b/tags/compression/index.html @@ -0,0 +1,5 @@ +Tag: compression - Open Neuromorphic

Tags

1 page

compression

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/compression/index.xml b/tags/compression/index.xml new file mode 100644 index 00000000..72db7c92 --- /dev/null +++ b/tags/compression/index.xml @@ -0,0 +1 @@ +compression on Open Neuromorphichttps://open-neuromorphic.org/tags/compression/Recent content in compression on Open NeuromorphicHugo -- gohugo.ioen-usTue, 28 Feb 2023 00:00:00 +0000Efficient compression for event-based datahttps://open-neuromorphic.org/p/efficient-compression-for-event-based-data/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/<img src="https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/file_read_benchmark.png" alt="Featured image of post Efficient compression for event-based data" />Efficient compression for event-based data Datasets grow larger in size As neuromorphic algorithms tackle more complex tasks that are linked to bigger datasets, and event cameras mature to have higher spatial resolution, it is worth looking at how to encode that data efficiently when storing it on disk. To give you an example, Prophesee&rsquo;s latest automotive object detection dataset is some 3.5 TB in size for under 40h of recordings with a single camera. \ No newline at end of file diff --git a/tags/compression/page/1/index.html b/tags/compression/page/1/index.html new file mode 100644 index 00000000..1a4debaa --- /dev/null +++ b/tags/compression/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/compression/ \ No newline at end of file diff --git a/tags/digital/index.html b/tags/digital/index.html new file mode 100644 index 00000000..cd756384 --- /dev/null +++ b/tags/digital/index.html @@ -0,0 +1,5 @@ +Tag: digital - Open Neuromorphic

Tags

3 pages

digital

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/digital/index.xml b/tags/digital/index.xml new file mode 100644 index 00000000..ce141f55 --- /dev/null +++ b/tags/digital/index.xml @@ -0,0 +1,4 @@ +digital on Open Neuromorphichttps://open-neuromorphic.org/tags/digital/Recent content in digital on Open NeuromorphicHugo -- gohugo.ioen-usMon, 27 Mar 2023 00:00:00 +0000Bits of Chips | TrueNorthhttps://open-neuromorphic.org/p/bits-of-chips-truenorth/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/p/bits-of-chips-truenorth/<img src="https://open-neuromorphic.org/p/bits-of-chips-truenorth/brain-to-chip.png" alt="Featured image of post Bits of Chips | TrueNorth" />Why do we want to emulate the brain? If you have ever read an article on neuromorphic computing, you might have noticed that in the introduction of each of these there is the same statement: &ldquo;The brain is much powerful than any AI machine when it comes to cognitive tasks but it runs on a 10W power budget!&rdquo;. This is absolutely true: neurons in the brain communicate among each other by means of spikes, which are short voltage pulses that propagate from one neuron to the other.Digital neuromophic hardware read listhttps://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/<img src="https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png" alt="Featured image of post Digital neuromophic hardware read list" />Here&rsquo;s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles! +If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here. +2015 TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al.Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer. +In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron. \ No newline at end of file diff --git a/tags/digital/page/1/index.html b/tags/digital/page/1/index.html new file mode 100644 index 00000000..305cf42e --- /dev/null +++ b/tags/digital/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/digital/ \ No newline at end of file diff --git a/tags/event-camera/index.html b/tags/event-camera/index.html new file mode 100644 index 00000000..109b89d6 --- /dev/null +++ b/tags/event-camera/index.html @@ -0,0 +1,5 @@ +Tag: event camera - Open Neuromorphic

Tags

1 page

event camera

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/event-camera/index.xml b/tags/event-camera/index.xml new file mode 100644 index 00000000..580c6b1f --- /dev/null +++ b/tags/event-camera/index.xml @@ -0,0 +1 @@ +event camera on Open Neuromorphichttps://open-neuromorphic.org/tags/event-camera/Recent content in event camera on Open NeuromorphicHugo -- gohugo.ioen-usTue, 28 Feb 2023 00:00:00 +0000Efficient compression for event-based datahttps://open-neuromorphic.org/p/efficient-compression-for-event-based-data/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/<img src="https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/file_read_benchmark.png" alt="Featured image of post Efficient compression for event-based data" />Efficient compression for event-based data Datasets grow larger in size As neuromorphic algorithms tackle more complex tasks that are linked to bigger datasets, and event cameras mature to have higher spatial resolution, it is worth looking at how to encode that data efficiently when storing it on disk. To give you an example, Prophesee&rsquo;s latest automotive object detection dataset is some 3.5 TB in size for under 40h of recordings with a single camera. \ No newline at end of file diff --git a/tags/event-camera/page/1/index.html b/tags/event-camera/page/1/index.html new file mode 100644 index 00000000..0b8da544 --- /dev/null +++ b/tags/event-camera/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/event-camera/ \ No newline at end of file diff --git a/tags/events/index.html b/tags/events/index.html new file mode 100644 index 00000000..fc132119 --- /dev/null +++ b/tags/events/index.html @@ -0,0 +1,5 @@ +Tag: events - Open Neuromorphic

Tags

1 page

events

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/events/index.xml b/tags/events/index.xml new file mode 100644 index 00000000..d7d94a2c --- /dev/null +++ b/tags/events/index.xml @@ -0,0 +1 @@ +events on Open Neuromorphichttps://open-neuromorphic.org/tags/events/Recent content in events on Open NeuromorphicHugo -- gohugo.ioen-usTue, 28 Feb 2023 00:00:00 +0000Efficient compression for event-based datahttps://open-neuromorphic.org/p/efficient-compression-for-event-based-data/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/<img src="https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/file_read_benchmark.png" alt="Featured image of post Efficient compression for event-based data" />Efficient compression for event-based data Datasets grow larger in size As neuromorphic algorithms tackle more complex tasks that are linked to bigger datasets, and event cameras mature to have higher spatial resolution, it is worth looking at how to encode that data efficiently when storing it on disk. To give you an example, Prophesee&rsquo;s latest automotive object detection dataset is some 3.5 TB in size for under 40h of recordings with a single camera. \ No newline at end of file diff --git a/tags/events/page/1/index.html b/tags/events/page/1/index.html new file mode 100644 index 00000000..e154c0ef --- /dev/null +++ b/tags/events/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/events/ \ No newline at end of file diff --git a/tags/file-encoding/index.html b/tags/file-encoding/index.html new file mode 100644 index 00000000..b44fadce --- /dev/null +++ b/tags/file-encoding/index.html @@ -0,0 +1,5 @@ +Tag: file encoding - Open Neuromorphic

Tags

1 page

file encoding

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/file-encoding/index.xml b/tags/file-encoding/index.xml new file mode 100644 index 00000000..a4772bc8 --- /dev/null +++ b/tags/file-encoding/index.xml @@ -0,0 +1 @@ +file encoding on Open Neuromorphichttps://open-neuromorphic.org/tags/file-encoding/Recent content in file encoding on Open NeuromorphicHugo -- gohugo.ioen-usTue, 28 Feb 2023 00:00:00 +0000Efficient compression for event-based datahttps://open-neuromorphic.org/p/efficient-compression-for-event-based-data/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/<img src="https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/file_read_benchmark.png" alt="Featured image of post Efficient compression for event-based data" />Efficient compression for event-based data Datasets grow larger in size As neuromorphic algorithms tackle more complex tasks that are linked to bigger datasets, and event cameras mature to have higher spatial resolution, it is worth looking at how to encode that data efficiently when storing it on disk. To give you an example, Prophesee&rsquo;s latest automotive object detection dataset is some 3.5 TB in size for under 40h of recordings with a single camera. \ No newline at end of file diff --git a/tags/file-encoding/page/1/index.html b/tags/file-encoding/page/1/index.html new file mode 100644 index 00000000..0962a307 --- /dev/null +++ b/tags/file-encoding/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/file-encoding/ \ No newline at end of file diff --git a/tags/framework/index.html b/tags/framework/index.html new file mode 100644 index 00000000..76ea9421 --- /dev/null +++ b/tags/framework/index.html @@ -0,0 +1,5 @@ +Tag: framework - Open Neuromorphic

Tags

1 page

framework

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/framework/index.xml b/tags/framework/index.xml new file mode 100644 index 00000000..fe7cacb5 --- /dev/null +++ b/tags/framework/index.xml @@ -0,0 +1 @@ +framework on Open Neuromorphichttps://open-neuromorphic.org/tags/framework/Recent content in framework on Open NeuromorphicHugo -- gohugo.ioen-usWed, 02 Aug 2023 00:00:00 +0000SNN library benchmarkshttps://open-neuromorphic.org/p/snn-library-benchmarks/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/p/snn-library-benchmarks/<img src="https://open-neuromorphic.org/p/snn-library-benchmarks/framework-benchmarking-16k-header.png" alt="Featured image of post SNN library benchmarks" />SNN library benchmarks Open Neuromorphic&rsquo;s list of SNN frameworks currently counts 10 libraries, and those are only the most popular ones! As the sizes of spiking neural network models grow thanks to deep learning, optimization becomes more important for researchers and practitioners alike. Training SNNs is often slow, as the stateful networks are typically fed sequential inputs. Today&rsquo;s most popular training method then is some form of backpropagation through time, whose time complexity scales with the number of time steps. \ No newline at end of file diff --git a/tags/framework/page/1/index.html b/tags/framework/page/1/index.html new file mode 100644 index 00000000..99b2eb21 --- /dev/null +++ b/tags/framework/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/framework/ \ No newline at end of file diff --git a/tags/hardware/index.html b/tags/hardware/index.html new file mode 100644 index 00000000..8e736dc6 --- /dev/null +++ b/tags/hardware/index.html @@ -0,0 +1,5 @@ +Tag: hardware - Open Neuromorphic

Tags

3 pages

hardware

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/hardware/index.xml b/tags/hardware/index.xml new file mode 100644 index 00000000..2d3b5664 --- /dev/null +++ b/tags/hardware/index.xml @@ -0,0 +1,4 @@ +hardware on Open Neuromorphichttps://open-neuromorphic.org/tags/hardware/Recent content in hardware on Open NeuromorphicHugo -- gohugo.ioen-usMon, 27 Mar 2023 00:00:00 +0000Bits of Chips | TrueNorthhttps://open-neuromorphic.org/p/bits-of-chips-truenorth/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/p/bits-of-chips-truenorth/<img src="https://open-neuromorphic.org/p/bits-of-chips-truenorth/brain-to-chip.png" alt="Featured image of post Bits of Chips | TrueNorth" />Why do we want to emulate the brain? If you have ever read an article on neuromorphic computing, you might have noticed that in the introduction of each of these there is the same statement: &ldquo;The brain is much powerful than any AI machine when it comes to cognitive tasks but it runs on a 10W power budget!&rdquo;. This is absolutely true: neurons in the brain communicate among each other by means of spikes, which are short voltage pulses that propagate from one neuron to the other.Digital neuromophic hardware read listhttps://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/<img src="https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png" alt="Featured image of post Digital neuromophic hardware read list" />Here&rsquo;s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles! +If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here. +2015 TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al.Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer. +In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron. \ No newline at end of file diff --git a/tags/hardware/page/1/index.html b/tags/hardware/page/1/index.html new file mode 100644 index 00000000..0d733623 --- /dev/null +++ b/tags/hardware/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/hardware/ \ No newline at end of file diff --git a/tags/index.html b/tags/index.html new file mode 100644 index 00000000..b3dd3ecc --- /dev/null +++ b/tags/index.html @@ -0,0 +1,8 @@ +Tags

Section

17 pages

Tags

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/index.xml b/tags/index.xml new file mode 100644 index 00000000..edad2d9f --- /dev/null +++ b/tags/index.xml @@ -0,0 +1 @@ +Tags on Open Neuromorphichttps://open-neuromorphic.org/tags/Recent content in Tags on Open NeuromorphicHugo -- gohugo.ioen-usWed, 02 Aug 2023 00:00:00 +0000frameworkhttps://open-neuromorphic.org/tags/framework/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/tags/framework/libraryhttps://open-neuromorphic.org/tags/library/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/tags/library/pytorchhttps://open-neuromorphic.org/tags/pytorch/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/tags/pytorch/snnhttps://open-neuromorphic.org/tags/snn/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/tags/snn/digitalhttps://open-neuromorphic.org/tags/digital/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/tags/digital/hardwarehttps://open-neuromorphic.org/tags/hardware/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/tags/hardware/neuromorphichttps://open-neuromorphic.org/tags/neuromorphic/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/tags/neuromorphic/researchhttps://open-neuromorphic.org/tags/research/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/tags/research/compressionhttps://open-neuromorphic.org/tags/compression/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/tags/compression/event camerahttps://open-neuromorphic.org/tags/event-camera/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/tags/event-camera/eventshttps://open-neuromorphic.org/tags/events/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/tags/events/file encodinghttps://open-neuromorphic.org/tags/file-encoding/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/tags/file-encoding/AIhttps://open-neuromorphic.org/tags/ai/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/tags/ai/machine learninghttps://open-neuromorphic.org/tags/machine-learning/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/tags/machine-learning/rtlhttps://open-neuromorphic.org/tags/rtl/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/tags/rtl/spikinghttps://open-neuromorphic.org/tags/spiking/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/tags/spiking/veriloghttps://open-neuromorphic.org/tags/verilog/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/tags/verilog/ \ No newline at end of file diff --git a/tags/library/index.html b/tags/library/index.html new file mode 100644 index 00000000..079bd773 --- /dev/null +++ b/tags/library/index.html @@ -0,0 +1,5 @@ +Tag: library - Open Neuromorphic

Tags

1 page

library

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/library/index.xml b/tags/library/index.xml new file mode 100644 index 00000000..e517419d --- /dev/null +++ b/tags/library/index.xml @@ -0,0 +1 @@ +library on Open Neuromorphichttps://open-neuromorphic.org/tags/library/Recent content in library on Open NeuromorphicHugo -- gohugo.ioen-usWed, 02 Aug 2023 00:00:00 +0000SNN library benchmarkshttps://open-neuromorphic.org/p/snn-library-benchmarks/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/p/snn-library-benchmarks/<img src="https://open-neuromorphic.org/p/snn-library-benchmarks/framework-benchmarking-16k-header.png" alt="Featured image of post SNN library benchmarks" />SNN library benchmarks Open Neuromorphic&rsquo;s list of SNN frameworks currently counts 10 libraries, and those are only the most popular ones! As the sizes of spiking neural network models grow thanks to deep learning, optimization becomes more important for researchers and practitioners alike. Training SNNs is often slow, as the stateful networks are typically fed sequential inputs. Today&rsquo;s most popular training method then is some form of backpropagation through time, whose time complexity scales with the number of time steps. \ No newline at end of file diff --git a/tags/library/page/1/index.html b/tags/library/page/1/index.html new file mode 100644 index 00000000..3e230c8f --- /dev/null +++ b/tags/library/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/library/ \ No newline at end of file diff --git a/tags/machine-learning/index.html b/tags/machine-learning/index.html new file mode 100644 index 00000000..206b95f5 --- /dev/null +++ b/tags/machine-learning/index.html @@ -0,0 +1,5 @@ +Tag: machine learning - Open Neuromorphic

Tags

1 page

machine learning

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/machine-learning/index.xml b/tags/machine-learning/index.xml new file mode 100644 index 00000000..f099053b --- /dev/null +++ b/tags/machine-learning/index.xml @@ -0,0 +1,2 @@ +machine learning on Open Neuromorphichttps://open-neuromorphic.org/tags/machine-learning/Recent content in machine learning on Open NeuromorphicHugo -- gohugo.ioen-usMon, 02 Jan 2023 00:00:00 +0000Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer. +In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron. \ No newline at end of file diff --git a/tags/machine-learning/page/1/index.html b/tags/machine-learning/page/1/index.html new file mode 100644 index 00000000..2d789551 --- /dev/null +++ b/tags/machine-learning/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/machine-learning/ \ No newline at end of file diff --git a/tags/neuromorphic/index.html b/tags/neuromorphic/index.html new file mode 100644 index 00000000..8027d2be --- /dev/null +++ b/tags/neuromorphic/index.html @@ -0,0 +1,5 @@ +Tag: neuromorphic - Open Neuromorphic

Tags

2 pages

neuromorphic

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/neuromorphic/index.xml b/tags/neuromorphic/index.xml new file mode 100644 index 00000000..ad3cd43b --- /dev/null +++ b/tags/neuromorphic/index.xml @@ -0,0 +1,3 @@ +neuromorphic on Open Neuromorphichttps://open-neuromorphic.org/tags/neuromorphic/Recent content in neuromorphic on Open NeuromorphicHugo -- gohugo.ioen-usMon, 27 Mar 2023 00:00:00 +0000Bits of Chips | TrueNorthhttps://open-neuromorphic.org/p/bits-of-chips-truenorth/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/p/bits-of-chips-truenorth/<img src="https://open-neuromorphic.org/p/bits-of-chips-truenorth/brain-to-chip.png" alt="Featured image of post Bits of Chips | TrueNorth" />Why do we want to emulate the brain? If you have ever read an article on neuromorphic computing, you might have noticed that in the introduction of each of these there is the same statement: &ldquo;The brain is much powerful than any AI machine when it comes to cognitive tasks but it runs on a 10W power budget!&rdquo;. This is absolutely true: neurons in the brain communicate among each other by means of spikes, which are short voltage pulses that propagate from one neuron to the other.Digital neuromophic hardware read listhttps://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/<img src="https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png" alt="Featured image of post Digital neuromophic hardware read list" />Here&rsquo;s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles! +If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here. +2015 TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al. \ No newline at end of file diff --git a/tags/neuromorphic/page/1/index.html b/tags/neuromorphic/page/1/index.html new file mode 100644 index 00000000..5d7c9d83 --- /dev/null +++ b/tags/neuromorphic/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/neuromorphic/ \ No newline at end of file diff --git a/tags/page/1/index.html b/tags/page/1/index.html new file mode 100644 index 00000000..bfac06e9 --- /dev/null +++ b/tags/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/ \ No newline at end of file diff --git a/tags/page/2/index.html b/tags/page/2/index.html new file mode 100644 index 00000000..458e3965 --- /dev/null +++ b/tags/page/2/index.html @@ -0,0 +1,8 @@ +Tags

Section

17 pages

Tags

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/page/3/index.html b/tags/page/3/index.html new file mode 100644 index 00000000..708c94a7 --- /dev/null +++ b/tags/page/3/index.html @@ -0,0 +1,8 @@ +Tags

Section

17 pages

Tags

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/page/4/index.html b/tags/page/4/index.html new file mode 100644 index 00000000..d6bb7719 --- /dev/null +++ b/tags/page/4/index.html @@ -0,0 +1,8 @@ +Tags

Section

17 pages

Tags

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/pytorch/index.html b/tags/pytorch/index.html new file mode 100644 index 00000000..43175f88 --- /dev/null +++ b/tags/pytorch/index.html @@ -0,0 +1,5 @@ +Tag: pytorch - Open Neuromorphic

Tags

1 page

pytorch

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/pytorch/index.xml b/tags/pytorch/index.xml new file mode 100644 index 00000000..5934564f --- /dev/null +++ b/tags/pytorch/index.xml @@ -0,0 +1 @@ +pytorch on Open Neuromorphichttps://open-neuromorphic.org/tags/pytorch/Recent content in pytorch on Open NeuromorphicHugo -- gohugo.ioen-usWed, 02 Aug 2023 00:00:00 +0000SNN library benchmarkshttps://open-neuromorphic.org/p/snn-library-benchmarks/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/p/snn-library-benchmarks/<img src="https://open-neuromorphic.org/p/snn-library-benchmarks/framework-benchmarking-16k-header.png" alt="Featured image of post SNN library benchmarks" />SNN library benchmarks Open Neuromorphic&rsquo;s list of SNN frameworks currently counts 10 libraries, and those are only the most popular ones! As the sizes of spiking neural network models grow thanks to deep learning, optimization becomes more important for researchers and practitioners alike. Training SNNs is often slow, as the stateful networks are typically fed sequential inputs. Today&rsquo;s most popular training method then is some form of backpropagation through time, whose time complexity scales with the number of time steps. \ No newline at end of file diff --git a/tags/pytorch/page/1/index.html b/tags/pytorch/page/1/index.html new file mode 100644 index 00000000..22724b20 --- /dev/null +++ b/tags/pytorch/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/pytorch/ \ No newline at end of file diff --git a/tags/research/index.html b/tags/research/index.html new file mode 100644 index 00000000..d8ea751d --- /dev/null +++ b/tags/research/index.html @@ -0,0 +1,5 @@ +Tag: research - Open Neuromorphic

Tags

2 pages

research

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/research/index.xml b/tags/research/index.xml new file mode 100644 index 00000000..4a70c8e7 --- /dev/null +++ b/tags/research/index.xml @@ -0,0 +1,3 @@ +research on Open Neuromorphichttps://open-neuromorphic.org/tags/research/Recent content in research on Open NeuromorphicHugo -- gohugo.ioen-usMon, 27 Mar 2023 00:00:00 +0000Bits of Chips | TrueNorthhttps://open-neuromorphic.org/p/bits-of-chips-truenorth/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/p/bits-of-chips-truenorth/<img src="https://open-neuromorphic.org/p/bits-of-chips-truenorth/brain-to-chip.png" alt="Featured image of post Bits of Chips | TrueNorth" />Why do we want to emulate the brain? If you have ever read an article on neuromorphic computing, you might have noticed that in the introduction of each of these there is the same statement: &ldquo;The brain is much powerful than any AI machine when it comes to cognitive tasks but it runs on a 10W power budget!&rdquo;. This is absolutely true: neurons in the brain communicate among each other by means of spikes, which are short voltage pulses that propagate from one neuron to the other.Digital neuromophic hardware read listhttps://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/<img src="https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png" alt="Featured image of post Digital neuromophic hardware read list" />Here&rsquo;s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles! +If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here. +2015 TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al. \ No newline at end of file diff --git a/tags/research/page/1/index.html b/tags/research/page/1/index.html new file mode 100644 index 00000000..d7116322 --- /dev/null +++ b/tags/research/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/research/ \ No newline at end of file diff --git a/tags/rtl/index.html b/tags/rtl/index.html new file mode 100644 index 00000000..7ba6efc9 --- /dev/null +++ b/tags/rtl/index.html @@ -0,0 +1,5 @@ +Tag: rtl - Open Neuromorphic

Tags

1 page

rtl

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/rtl/index.xml b/tags/rtl/index.xml new file mode 100644 index 00000000..d2fe18ce --- /dev/null +++ b/tags/rtl/index.xml @@ -0,0 +1,2 @@ +rtl on Open Neuromorphichttps://open-neuromorphic.org/tags/rtl/Recent content in rtl on Open NeuromorphicHugo -- gohugo.ioen-usMon, 02 Jan 2023 00:00:00 +0000Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer. +In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron. \ No newline at end of file diff --git a/tags/rtl/page/1/index.html b/tags/rtl/page/1/index.html new file mode 100644 index 00000000..d7d89685 --- /dev/null +++ b/tags/rtl/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/rtl/ \ No newline at end of file diff --git a/tags/snn/index.html b/tags/snn/index.html new file mode 100644 index 00000000..901aeb62 --- /dev/null +++ b/tags/snn/index.html @@ -0,0 +1,5 @@ +Tag: snn - Open Neuromorphic

Tags

3 pages

snn

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/snn/index.xml b/tags/snn/index.xml new file mode 100644 index 00000000..c42e2078 --- /dev/null +++ b/tags/snn/index.xml @@ -0,0 +1,4 @@ +snn on Open Neuromorphichttps://open-neuromorphic.org/tags/snn/Recent content in snn on Open NeuromorphicHugo -- gohugo.ioen-usWed, 02 Aug 2023 00:00:00 +0000SNN library benchmarkshttps://open-neuromorphic.org/p/snn-library-benchmarks/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/p/snn-library-benchmarks/<img src="https://open-neuromorphic.org/p/snn-library-benchmarks/framework-benchmarking-16k-header.png" alt="Featured image of post SNN library benchmarks" />SNN library benchmarks Open Neuromorphic&rsquo;s list of SNN frameworks currently counts 10 libraries, and those are only the most popular ones! As the sizes of spiking neural network models grow thanks to deep learning, optimization becomes more important for researchers and practitioners alike. Training SNNs is often slow, as the stateful networks are typically fed sequential inputs. Today&rsquo;s most popular training method then is some form of backpropagation through time, whose time complexity scales with the number of time steps.Digital neuromophic hardware read listhttps://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/<img src="https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png" alt="Featured image of post Digital neuromophic hardware read list" />Here&rsquo;s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles! +If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here. +2015 TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al.Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer. +In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron. \ No newline at end of file diff --git a/tags/snn/page/1/index.html b/tags/snn/page/1/index.html new file mode 100644 index 00000000..dd129b78 --- /dev/null +++ b/tags/snn/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/snn/ \ No newline at end of file diff --git a/tags/spiking/index.html b/tags/spiking/index.html new file mode 100644 index 00000000..7181d3db --- /dev/null +++ b/tags/spiking/index.html @@ -0,0 +1,5 @@ +Tag: spiking - Open Neuromorphic

Tags

1 page

spiking

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/spiking/index.xml b/tags/spiking/index.xml new file mode 100644 index 00000000..50cadc5b --- /dev/null +++ b/tags/spiking/index.xml @@ -0,0 +1,2 @@ +spiking on Open Neuromorphichttps://open-neuromorphic.org/tags/spiking/Recent content in spiking on Open NeuromorphicHugo -- gohugo.ioen-usMon, 02 Jan 2023 00:00:00 +0000Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer. +In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron. \ No newline at end of file diff --git a/tags/spiking/page/1/index.html b/tags/spiking/page/1/index.html new file mode 100644 index 00000000..c355ad55 --- /dev/null +++ b/tags/spiking/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/spiking/ \ No newline at end of file diff --git a/tags/verilog/index.html b/tags/verilog/index.html new file mode 100644 index 00000000..b41d1051 --- /dev/null +++ b/tags/verilog/index.html @@ -0,0 +1,5 @@ +Tag: verilog - Open Neuromorphic

Tags

1 page

verilog

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/tags/verilog/index.xml b/tags/verilog/index.xml new file mode 100644 index 00000000..91b6783d --- /dev/null +++ b/tags/verilog/index.xml @@ -0,0 +1,2 @@ +verilog on Open Neuromorphichttps://open-neuromorphic.org/tags/verilog/Recent content in verilog on Open NeuromorphicHugo -- gohugo.ioen-usMon, 02 Jan 2023 00:00:00 +0000Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer. +In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron. \ No newline at end of file diff --git a/tags/verilog/page/1/index.html b/tags/verilog/page/1/index.html new file mode 100644 index 00000000..97572165 --- /dev/null +++ b/tags/verilog/page/1/index.html @@ -0,0 +1 @@ +https://open-neuromorphic.org/tags/verilog/ \ No newline at end of file diff --git a/team/alexander-hadjiivanov.jpeg b/team/alexander-hadjiivanov.jpeg new file mode 100644 index 00000000..cdffa33b Binary files /dev/null and b/team/alexander-hadjiivanov.jpeg differ diff --git a/team/alexander-hadjiivanov_hub8fcf2814c8e5b8f14748f2967d50efc_34856_1024x0_resize_q75_box.jpeg b/team/alexander-hadjiivanov_hub8fcf2814c8e5b8f14748f2967d50efc_34856_1024x0_resize_q75_box.jpeg new file mode 100644 index 00000000..6dc396ed Binary files /dev/null and b/team/alexander-hadjiivanov_hub8fcf2814c8e5b8f14748f2967d50efc_34856_1024x0_resize_q75_box.jpeg differ diff --git a/team/alexander-hadjiivanov_hub8fcf2814c8e5b8f14748f2967d50efc_34856_480x0_resize_q75_box.jpeg b/team/alexander-hadjiivanov_hub8fcf2814c8e5b8f14748f2967d50efc_34856_480x0_resize_q75_box.jpeg new file mode 100644 index 00000000..ae48cb58 Binary files /dev/null and b/team/alexander-hadjiivanov_hub8fcf2814c8e5b8f14748f2967d50efc_34856_480x0_resize_q75_box.jpeg differ diff --git a/team/alexander-henkes.jpg b/team/alexander-henkes.jpg new file mode 100644 index 00000000..764528d2 Binary files /dev/null and b/team/alexander-henkes.jpg differ diff --git a/team/alexander-henkes_hu69c5a2c53edaa3fb83fe5cf6ef3a6a54_7739_1024x0_resize_q75_box.jpg b/team/alexander-henkes_hu69c5a2c53edaa3fb83fe5cf6ef3a6a54_7739_1024x0_resize_q75_box.jpg new file mode 100644 index 00000000..6ab2d13b Binary files /dev/null and b/team/alexander-henkes_hu69c5a2c53edaa3fb83fe5cf6ef3a6a54_7739_1024x0_resize_q75_box.jpg differ diff --git a/team/alexander-henkes_hu69c5a2c53edaa3fb83fe5cf6ef3a6a54_7739_480x0_resize_q75_box.jpg b/team/alexander-henkes_hu69c5a2c53edaa3fb83fe5cf6ef3a6a54_7739_480x0_resize_q75_box.jpg new file mode 100644 index 00000000..ffabdf4f Binary files /dev/null and b/team/alexander-henkes_hu69c5a2c53edaa3fb83fe5cf6ef3a6a54_7739_480x0_resize_q75_box.jpg differ diff --git a/team/catherine-schuman.jpg b/team/catherine-schuman.jpg new file mode 100644 index 00000000..eb4169fe Binary files /dev/null and b/team/catherine-schuman.jpg differ diff --git a/team/charlotte-frenkel.jpg b/team/charlotte-frenkel.jpg new file mode 100644 index 00000000..314bfaea Binary files /dev/null and b/team/charlotte-frenkel.jpg differ diff --git a/team/charlotte-frenkel_hu60129e51654de23dbae7e3ea857be115_12952_1024x0_resize_q75_box.jpg b/team/charlotte-frenkel_hu60129e51654de23dbae7e3ea857be115_12952_1024x0_resize_q75_box.jpg new file mode 100644 index 00000000..5151c3d8 Binary files /dev/null and b/team/charlotte-frenkel_hu60129e51654de23dbae7e3ea857be115_12952_1024x0_resize_q75_box.jpg differ diff --git a/team/charlotte-frenkel_hu60129e51654de23dbae7e3ea857be115_12952_480x0_resize_q75_box.jpg b/team/charlotte-frenkel_hu60129e51654de23dbae7e3ea857be115_12952_480x0_resize_q75_box.jpg new file mode 100644 index 00000000..bd9b72f6 Binary files /dev/null and b/team/charlotte-frenkel_hu60129e51654de23dbae7e3ea857be115_12952_480x0_resize_q75_box.jpg differ diff --git a/team/fabrizio-ottati.jpg b/team/fabrizio-ottati.jpg new file mode 100644 index 00000000..3d00eac0 Binary files /dev/null and b/team/fabrizio-ottati.jpg differ diff --git a/team/gregor-lenz.jpeg b/team/gregor-lenz.jpeg new file mode 100644 index 00000000..fe60b3c1 Binary files /dev/null and b/team/gregor-lenz.jpeg differ diff --git a/team/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_1024x0_resize_q75_box.jpeg b/team/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_1024x0_resize_q75_box.jpeg new file mode 100644 index 00000000..18630779 Binary files /dev/null and b/team/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_1024x0_resize_q75_box.jpeg differ diff --git a/team/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_480x0_resize_q75_box.jpeg b/team/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_480x0_resize_q75_box.jpeg new file mode 100644 index 00000000..2bbd8587 Binary files /dev/null and b/team/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_480x0_resize_q75_box.jpeg differ diff --git a/team/henning-wessels.jpg b/team/henning-wessels.jpg new file mode 100644 index 00000000..73d4d9f6 Binary files /dev/null and b/team/henning-wessels.jpg differ diff --git a/team/index.html b/team/index.html new file mode 100644 index 00000000..8e5f6f88 --- /dev/null +++ b/team/index.html @@ -0,0 +1,15 @@ +Team

Team

Fabrizio Ottati

Fabrizio Ottati

Fabrizio Ottati

Fabrizio Ottati is a Ph.D. student in the Department of Electronics and Communications of Politecnico di Torino, under the supervision of professor Luciano Lavagno and professor Mario Roberto Casu.

His main interests are event-based cameras, digital hardware design and automation, spiking neural networks and piedmontese red wine. He is the maintainer of two open source projects in the field of neuromorphic computing, Tonic and Expelliarmus. You can find more information on his website.

Gregor Lenz

Gregor Lenz

Gregor Lenz graduated with a Ph.D. in neuromorphic engineering from Sorbonne University. He thinks that technology can learn a thing or two from how biological systems process information.

His main interests are event cameras that are inspired by the human retina and spiking neural networks that mimic human brain in an effort to teach machines to compute a bit more like humans do. At the very least there are some power efficiency gains to be made, but hopefully more! Also he loves to build open source software for spike-based machine learning. You can find more information on his personal website.

He is the maintainer of two open source projects in the field of neuromorphic computing, Tonic and expelliarmus.

Jason Eshraghian

Jason Eshraghian

Jason Eshraghian is an Assistant Professor at the Department of Electrical and Computer Engineering, University of California, Santa Cruz, leading the UCSC Neuromorphic Computing Group.

His research focuses on brain-inspired circuit design to accelerate AI algorithms and spiking neural networks. You can find more information on his personal website.

He is the maintainer of snnTorch.

Charlotte Frenkel

Charlotte Frenkel

Charlotte Frenkel is an Assistant Professor at the Microelectronics department of Delft University of Technology, Delft, The Netherlands. Her research goals are:

  • to demonstrate a competitive advantage for neuromorphic computing devices compared to conventional neural network accelerators,
  • to uncover a framework toward on-chip neuromorphic intelligence for adaptive edge computing.

To achieve these goals, she is investigating both the bottom-up and the top-down design approaches, as well as their synergies (see personal website).

She is the designer of the ODIN and ReckOn open-source online-learning digital neuromorphic processors.

Jens Egholm Pedersen

Jens Egholm Pedersen

Jens Egholm Pedersen is a doctoral student at the Royal Institute of Technology (KTH) working to model and construct neuromorphic control systems.

When faced with the complexity and ambiguity in the real world, contemporary algorithms fail spectacularly. There is a strong need for self-correcting, closed-loop systems to help solve our everyday physical problems.

By simulating and carefully scrutinizing and understanding neural circuits, including vision, motor control, and self-sustenance, Jens seeks to build autonomous systems that perform meaningful work, tightly following the Feynman axiom “What I cannot create, I do not understand”. You can find more information on his website.

He is the maintainer of norse and AEStream.

Steven Abreu

Steven Abreu

Steve is doing his PhD on neuromorphic computing theory in the MINDS research group at the new CogniGron center for cognitive systems and materials in Groningen. He is funded by the European Post-Digital research network.

In his PhD, he works with different neuromorphic systems (Loihi 2, DynapSE2, and photonic reservoirs) to develop programming methods for devices that explore a richer set of physical dynamics than the synchronous bi-stable switching that (most of) computer science relies on. Steve’s background is in computer science and machine learning, with a touch of physics.

You can find more information on his website.

Alexander Henkes

Alexander Henkes

Alexander Henkes received the B.Sc. (Mechanical engineering) and M.Sc. (Mechanical engineering) degrees from University of Paderborn, Germany, in 2015 and 2018, respectively. In 2022 he received his Ph.D with honors from the Technical University of Braunschweig (TUBS), Germany, for his thesis “Artificial Neural Networks in Continuum Micromechanics”.

He is currently a Post-Doctoral Research Fellow at the Institute for Computational Modeling in Civil Engineering at TUBS. In 2022, he was elected as a junior member of the German Association of Applied Mathematics and Mechanics (GAMM) for his outstanding research in the field of artificial intelligence in continuum micromechanics.

His current research focuses on spiking neural networks (SNN). Recently he published a preprint on nonlinear history-dependent regression using SNN. This enables SNN to be used in the context of applied mathematics and computational engineering.

He is a contributor of snnTorch.

Alexander Hadjiivanov

Alexander Hadjiivanov

Alexander is currently a Research Fellow with the Advanced Concepts Team at the European Space Agency. His research efforts are focused on retinomorphic vision as well as the interplay between homeostasis, adaptation, axon guidance and structural plasticity in spiking neural networks.

No single branch of AI can claim the crown of true intelligence on its own. Rather, developing AI worthy of the ‘I’ would require a concerted effort to combine virtually all the branches - from perception through learning and cognition to reasoning and interaction. The most enticing aspect of neuromorphic computing is its potential to bring about this unification.

Peng Zhou

Peng Zhou

Peng Zhou received the Ph.D. degree from The University of California, Santa Cruz, under the supervision of Prof. Sung-Mo “Steve” Kang and Prof. Jason Eshraghian, and focused on neuromorphic computing, spiking neural networks, and their hardware implementation using memristors.

She was awarded “Best New Neuromorph” at the 2022 Telluride Workshop on Neuromorphic Cognition Engineering. She also obtained working experience from Synsense, a neuromorphic startup that spun out of the Institute of Neuroinformatics at the University of Zurich and ETH Zurich, and Tetramem, a computing-in-memory startup whose RRAM technologies are based on Yang research group at USC, Nanodevices and Integrated Systems Laboratory at UMass Amherst, and HP labs.

She is a contributor of the open-source SNN frameworks snnTorch and Rockpool. She also designs open-source SNN chips.

You can find more information on her website.

Shyam Narayan

Shyam Narayan

Shyam is a Ph.D. student in the Neuromorphic Cognitive Systems (NCS) group at the Institute of Neuroinformatics, University of Zurich, and ETH Zurich with Prof. Giacomo Indiveri.

After working as a chip designer in the industry for over a decade, he decided to pursue a Ph.D. in Bio-Inspired Wide Band Circuits and Systems for Edge Neuromorphic Computing. His research interests lie at the intersection of analog/mixed-signal IC design, Neuroscience, and Robotics.

He believes that Neuromorphic Engineering has its own merits and a coordinated effort from the community on device, circuits, and algorithm levels are needed to push the envelope.

Catherine Schuman

Catherine Schuman

Catherine Schuman

Catherine (Katie) Schuman is an Assistant Professor in the Department of Electrical Engineering and Computer Science at the University of Tennessee (UT). She received her Ph.D. in Computer Science from UT in 2015, where she completed her dissertation on the use of evolutionary algorithms to train spiking neural networks for neuromorphic systems. Katie previously served as a research scientist at Oak Ridge National Laboratory, where her research focused on algorithms and applications of neuromorphic systems. Katie co-leads the TENNLab Neuromorphic Computing Research Group at UT. She has over 100 publications as well as seven patents in the field of neuromorphic computing. She received the Department of Energy Early Career Award in 2019.

Melika Payvand

Melika Payvand

Melika Payvand

Melika Payvand is an Assistant Professor at the Institute of Neuroinformatics, University of Zurich and ETH Zurich. She received her PhD in Electrical and Computer Engineering at the University of California Santa Barbara. Her recent research interest is in developing on-device learning systems that form themselves to the sensory input in a real-time fashion. Specifically, she exploits the physics of resistive memory for synaptic, neuronal, dendritic and structural plasticity, inspired by the structure-function relationship of the brain circuits. She is the recipient of the 2023 “Swiss ERC” starting grant and “Best Neuromorph” award at 2019 Telluride workshop. She has co-coordinated the European project NEUROTECH, is the program co-chair of the International Conference on Neuromorphic Systems (ICONS) and co-organizes the scientific program of the Capocaccia Neuromorphic Intelligence workshop.

Henning Wessels

Henning Wessels

Henning Wessels

Henning Wessels is Assistant Professor at the Department of Civil and Environmental Engineering, Technical University Braunschweig, where he is leading the data-driven modeling group. His group is developing computational methods at the intersection of mechanics, numerics, machine learning and uncertainty quantification. Applications can be found, for instance, in virtual sensing and structural health monitoring. Here, SNN offer huge potential for data-driven mechanics on neuromorphic hardware within embedded systems.

Built with Hugo
Theme Stack designed by Jimmy
+ \ No newline at end of file diff --git a/team/jason-eshraghian.webp b/team/jason-eshraghian.webp new file mode 100644 index 00000000..ffe17a39 Binary files /dev/null and b/team/jason-eshraghian.webp differ diff --git a/team/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_1024x0_resize_q75_h2_box_2.webp b/team/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_1024x0_resize_q75_h2_box_2.webp new file mode 100644 index 00000000..f5a6676b Binary files /dev/null and b/team/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_1024x0_resize_q75_h2_box_2.webp differ diff --git a/team/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_480x0_resize_q75_h2_box_2.webp b/team/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_480x0_resize_q75_h2_box_2.webp new file mode 100644 index 00000000..b609d50c Binary files /dev/null and b/team/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_480x0_resize_q75_h2_box_2.webp differ diff --git a/team/jens-egholm.png b/team/jens-egholm.png new file mode 100644 index 00000000..0e188fc4 Binary files /dev/null and b/team/jens-egholm.png differ diff --git a/team/jens-egholm_hudf55b28927feb6152e01d07f54f7ef1e_320513_1024x0_resize_box_3.png b/team/jens-egholm_hudf55b28927feb6152e01d07f54f7ef1e_320513_1024x0_resize_box_3.png new file mode 100644 index 00000000..dc8f6756 Binary files /dev/null and b/team/jens-egholm_hudf55b28927feb6152e01d07f54f7ef1e_320513_1024x0_resize_box_3.png differ diff --git a/team/jens-egholm_hudf55b28927feb6152e01d07f54f7ef1e_320513_480x0_resize_box_3.png b/team/jens-egholm_hudf55b28927feb6152e01d07f54f7ef1e_320513_480x0_resize_box_3.png new file mode 100644 index 00000000..3ef2b54a Binary files /dev/null and b/team/jens-egholm_hudf55b28927feb6152e01d07f54f7ef1e_320513_480x0_resize_box_3.png differ diff --git a/team/melika-payvand.jpg b/team/melika-payvand.jpg new file mode 100644 index 00000000..20339bc5 Binary files /dev/null and b/team/melika-payvand.jpg differ diff --git a/team/peng-zhou.jpeg b/team/peng-zhou.jpeg new file mode 100644 index 00000000..1f8f23e6 Binary files /dev/null and b/team/peng-zhou.jpeg differ diff --git a/team/peng-zhou_hu06543fcaee7fb3365185960f073e8649_25733_1024x0_resize_q75_box.jpeg b/team/peng-zhou_hu06543fcaee7fb3365185960f073e8649_25733_1024x0_resize_q75_box.jpeg new file mode 100644 index 00000000..cd691091 Binary files /dev/null and b/team/peng-zhou_hu06543fcaee7fb3365185960f073e8649_25733_1024x0_resize_q75_box.jpeg differ diff --git a/team/peng-zhou_hu06543fcaee7fb3365185960f073e8649_25733_480x0_resize_q75_box.jpeg b/team/peng-zhou_hu06543fcaee7fb3365185960f073e8649_25733_480x0_resize_q75_box.jpeg new file mode 100644 index 00000000..3250a9f0 Binary files /dev/null and b/team/peng-zhou_hu06543fcaee7fb3365185960f073e8649_25733_480x0_resize_q75_box.jpeg differ diff --git a/team/shyam-narayanan.jpeg b/team/shyam-narayanan.jpeg new file mode 100644 index 00000000..540a4a90 Binary files /dev/null and b/team/shyam-narayanan.jpeg differ diff --git a/team/shyam-narayanan_hu720f02c2842a352e1bd5c03ea8829556_29506_1024x0_resize_q75_box.jpeg b/team/shyam-narayanan_hu720f02c2842a352e1bd5c03ea8829556_29506_1024x0_resize_q75_box.jpeg new file mode 100644 index 00000000..f8ea3cfd Binary files /dev/null and b/team/shyam-narayanan_hu720f02c2842a352e1bd5c03ea8829556_29506_1024x0_resize_q75_box.jpeg differ diff --git a/team/shyam-narayanan_hu720f02c2842a352e1bd5c03ea8829556_29506_480x0_resize_q75_box.jpeg b/team/shyam-narayanan_hu720f02c2842a352e1bd5c03ea8829556_29506_480x0_resize_q75_box.jpeg new file mode 100644 index 00000000..6c6064f2 Binary files /dev/null and b/team/shyam-narayanan_hu720f02c2842a352e1bd5c03ea8829556_29506_480x0_resize_q75_box.jpeg differ diff --git a/team/steven-abreu.jpg b/team/steven-abreu.jpg new file mode 100644 index 00000000..0ef4dacb Binary files /dev/null and b/team/steven-abreu.jpg differ diff --git a/team/steven-abreu_hu19b4cb9d8e790ac29dad8a39fbedb7d3_110937_1024x0_resize_q75_box.jpg b/team/steven-abreu_hu19b4cb9d8e790ac29dad8a39fbedb7d3_110937_1024x0_resize_q75_box.jpg new file mode 100644 index 00000000..4d8f9d5e Binary files /dev/null and b/team/steven-abreu_hu19b4cb9d8e790ac29dad8a39fbedb7d3_110937_1024x0_resize_q75_box.jpg differ diff --git a/team/steven-abreu_hu19b4cb9d8e790ac29dad8a39fbedb7d3_110937_480x0_resize_q75_box.jpg b/team/steven-abreu_hu19b4cb9d8e790ac29dad8a39fbedb7d3_110937_480x0_resize_q75_box.jpg new file mode 100644 index 00000000..2ab0797d Binary files /dev/null and b/team/steven-abreu_hu19b4cb9d8e790ac29dad8a39fbedb7d3_110937_480x0_resize_q75_box.jpg differ diff --git a/ts/main.js b/ts/main.js new file mode 100644 index 00000000..d3fedd90 --- /dev/null +++ b/ts/main.js @@ -0,0 +1,11 @@ +(()=>{var u=class{galleryUID;items=[];constructor(t,r=1){if(window.PhotoSwipe==null||window.PhotoSwipeUI_Default==null){console.error("PhotoSwipe lib not loaded.");return}this.galleryUID=r,u.createGallery(t),this.loadItems(t),this.bindClick()}loadItems(t){this.items=[];let r=t.querySelectorAll("figure.gallery-image");for(let i of r){let n=i.querySelector("figcaption"),o=i.querySelector("img"),s={w:parseInt(o.getAttribute("width")),h:parseInt(o.getAttribute("height")),src:o.src,msrc:o.getAttribute("data-thumb")||o.src,el:i};n&&(s.title=n.innerHTML),this.items.push(s)}}static createGallery(t){let r=t.querySelectorAll("img.gallery-image");for(let o of Array.from(r)){let s=o.closest("p");if(!s||!t.contains(s)||(s.textContent.trim()==""&&s.classList.add("no-text"),!s.classList.contains("no-text")))continue;let d=o.parentElement.tagName=="A",m=o,c=document.createElement("figure");if(c.style.setProperty("flex-grow",o.getAttribute("data-flex-grow")||"1"),c.style.setProperty("flex-basis",o.getAttribute("data-flex-basis")||"0"),d&&(m=o.parentElement),m.parentElement.insertBefore(c,m),c.appendChild(m),o.hasAttribute("alt")){let l=document.createElement("figcaption");l.innerText=o.getAttribute("alt"),c.appendChild(l)}if(!d){c.className="gallery-image";let l=document.createElement("a");l.href=o.src,l.setAttribute("target","_blank"),o.parentNode.insertBefore(l,o),l.appendChild(o)}}let i=t.querySelectorAll("figure.gallery-image"),n=[];for(let o of i)n.length?o.previousElementSibling===n[n.length-1]?n.push(o):n.length&&(u.wrap(n),n=[o]):n=[o];n.length>0&&u.wrap(n)}static wrap(t){let r=document.createElement("div");r.className="gallery";let i=t[0].parentNode,n=t[0];i.insertBefore(r,n);for(let o of t)r.appendChild(o)}open(t){let r=document.querySelector(".pswp");new window.PhotoSwipe(r,window.PhotoSwipeUI_Default,this.items,{index:t,galleryUID:this.galleryUID,getThumbBoundsFn:n=>{let o=this.items[n].el.getElementsByTagName("img")[0],s=window.pageYOffset||document.documentElement.scrollTop,a=o.getBoundingClientRect();return{x:a.left,y:a.top+s,w:a.width}}}).init()}bindClick(){for(let[t,r]of this.items.entries())r.el.querySelector("a").addEventListener("click",n=>{n.preventDefault(),this.open(t)})}},b=u;var h={};if(localStorage.hasOwnProperty("StackColorsCache"))try{h=JSON.parse(localStorage.getItem("StackColorsCache"))}catch{h={}}async function S(e,t,r){if(!e)return await Vibrant.from(r).getPalette();if(!h.hasOwnProperty(e)||h[e].hash!==t){let i=await Vibrant.from(r).getPalette();h[e]={hash:t,Vibrant:{hex:i.Vibrant.hex,rgb:i.Vibrant.rgb,bodyTextColor:i.Vibrant.bodyTextColor},DarkMuted:{hex:i.DarkMuted.hex,rgb:i.DarkMuted.rgb,bodyTextColor:i.DarkMuted.bodyTextColor}},localStorage.setItem("StackColorsCache",JSON.stringify(h))}return h[e]}var D=(e,t=500)=>{e.classList.add("transiting"),e.style.transitionProperty="height, margin, padding",e.style.transitionDuration=t+"ms",e.style.height=e.offsetHeight+"px",e.offsetHeight,e.style.overflow="hidden",e.style.height="0",e.style.paddingTop="0",e.style.paddingBottom="0",e.style.marginTop="0",e.style.marginBottom="0",window.setTimeout(()=>{e.classList.remove("show"),e.style.removeProperty("height"),e.style.removeProperty("padding-top"),e.style.removeProperty("padding-bottom"),e.style.removeProperty("margin-top"),e.style.removeProperty("margin-bottom"),e.style.removeProperty("overflow"),e.style.removeProperty("transition-duration"),e.style.removeProperty("transition-property"),e.classList.remove("transiting")},t)},q=(e,t=500)=>{e.classList.add("transiting"),e.style.removeProperty("display"),e.classList.add("show");let r=e.offsetHeight;e.style.overflow="hidden",e.style.height="0",e.style.paddingTop="0",e.style.paddingBottom="0",e.style.marginTop="0",e.style.marginBottom="0",e.offsetHeight,e.style.transitionProperty="height, margin, padding",e.style.transitionDuration=t+"ms",e.style.height=r+"px",e.style.removeProperty("padding-top"),e.style.removeProperty("padding-bottom"),e.style.removeProperty("margin-top"),e.style.removeProperty("margin-bottom"),window.setTimeout(()=>{e.style.removeProperty("height"),e.style.removeProperty("overflow"),e.style.removeProperty("transition-duration"),e.style.removeProperty("transition-property"),e.classList.remove("transiting")},t)},B=(e,t=500)=>window.getComputedStyle(e).display==="none"?q(e,t):D(e,t);function v(){let e=document.getElementById("toggle-menu");e&&e.addEventListener("click",()=>{document.getElementById("main-menu").classList.contains("transiting")||(document.body.classList.toggle("show-menu"),B(document.getElementById("main-menu"),300),e.classList.toggle("is-active"))})}function N(e,t,r){var i=document.createElement(e);for(let n in t)if(n&&t.hasOwnProperty(n)){let o=t[n];n=="dangerouslySetInnerHTML"?i.innerHTML=o.__html:o===!0?i.setAttribute(n,n):o!==!1&&o!=null&&i.setAttribute(n,o.toString())}for(let n=2;n{this.isDark()?this.currentScheme="light":this.currentScheme="dark",this.setBodyClass(),this.currentScheme==this.systemPreferScheme&&(this.currentScheme="auto"),this.saveScheme()})}isDark(){return this.currentScheme=="dark"||this.currentScheme=="auto"&&this.systemPreferScheme=="dark"}dispatchEvent(t){let r=new CustomEvent("onColorSchemeChange",{detail:t});window.dispatchEvent(r)}setBodyClass(){this.isDark()?document.documentElement.dataset.scheme="dark":document.documentElement.dataset.scheme="light",this.dispatchEvent(document.documentElement.dataset.scheme)}getSavedScheme(){let t=localStorage.getItem(this.localStorageKey);return t=="light"||t=="dark"||t=="auto"?t:"auto"}bindMatchMedia(){window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change",t=>{t.matches?this.systemPreferScheme="dark":this.systemPreferScheme="light",this.setBodyClass()})}},E=y;function g(e){let t;return()=>{t&&window.cancelAnimationFrame(t),t=window.requestAnimationFrame(()=>e())}}var O=".article-content h1[id], .article-content h2[id], .article-content h3[id], .article-content h4[id], .article-content h5[id], .article-content h6[id]",T="#TableOfContents",L="#TableOfContents li",C="active-class";function V(e,t){let r=e.querySelector("a").offsetHeight,i=e.offsetTop-t.offsetHeight/2+r/2-t.offsetTop;i<0&&(i=0),t.scrollTo({top:i,behavior:"smooth"})}function U(e){let t={};return e.forEach(r=>{let n=r.querySelector("a").getAttribute("href");n.startsWith("#")&&(t[n.slice(1)]=r)}),t}function k(e){let t=[];return e.forEach(r=>{t.push({id:r.id,offset:r.offsetTop})}),t.sort((r,i)=>r.offset-i.offset),t}function M(){let e=document.querySelectorAll(O);if(!e){console.warn("No header matched query",e);return}let t=document.querySelector(T);if(!t){console.warn("No toc matched query",T);return}let r=document.querySelectorAll(L);if(!r){console.warn("No navigation matched query",L);return}let i=k(e),n=!1;t.addEventListener("mouseenter",g(()=>n=!0)),t.addEventListener("mouseleave",g(()=>n=!1));let o,s=U(r);function a(){let m=document.documentElement.scrollTop||document.body.scrollTop,c;i.forEach(p=>{m>=p.offset-20&&(c=document.getElementById(p.id))});let l;c&&(l=s[c.id]),c&&!l?console.debug("No link found for section",c):l!==o&&(o&&o.classList.remove(C),l&&(l.classList.add(C),n||V(l,t)),o=l)}window.addEventListener("scroll",g(a));function d(){i=k(e),a()}window.addEventListener("resize",g(d))}var $="a[href]";function P(){document.querySelectorAll($).forEach(e=>{e.getAttribute("href").startsWith("#")&&e.addEventListener("click",r=>{r.preventDefault();let i=e.getAttribute("href").substring(1),n=document.getElementById(i),o=n.getBoundingClientRect().top-document.documentElement.getBoundingClientRect().top;window.history.pushState({},"",e.getAttribute("href")),scrollTo({top:o,behavior:"smooth"})})})}var x={init:()=>{v();let e=document.querySelector(".article-content");e&&(new b(e),P(),M());let t=document.querySelector(".article-list--tile");t&&new IntersectionObserver(async(s,a)=>{s.forEach(d=>{if(!d.isIntersecting)return;a.unobserve(d.target),d.target.querySelectorAll("article.has-image").forEach(async c=>{let l=c.querySelector("img"),p=l.src,H=l.getAttribute("data-key"),I=l.getAttribute("data-hash"),A=c.querySelector(".article-details"),f=await S(H,I,p);A.style.background=` + linear-gradient(0deg, + rgba(${f.DarkMuted.rgb[0]}, ${f.DarkMuted.rgb[1]}, ${f.DarkMuted.rgb[2]}, 0.5) 0%, + rgba(${f.Vibrant.rgb[0]}, ${f.Vibrant.rgb[1]}, ${f.Vibrant.rgb[2]}, 0.75) 100%)`})})}).observe(t);let r=document.querySelectorAll(".article-content div.highlight"),i="Copy",n="Copied!";r.forEach(o=>{let s=document.createElement("button");s.innerHTML=i,s.classList.add("copyCodeButton"),o.appendChild(s);let a=o.querySelector("code[data-lang]");a&&s.addEventListener("click",()=>{navigator.clipboard.writeText(a.textContent).then(()=>{s.textContent=n,setTimeout(()=>{s.textContent=i},1e3)}).catch(d=>{alert(d),console.log("Something went wrong",d)})})}),new E(document.getElementById("dark-mode-toggle"))}};window.addEventListener("load",()=>{setTimeout(function(){x.init()},0)});window.Stack=x;window.createElement=w;})(); +/*! +* Hugo Theme Stack +* +* @author: Jimmy Cai +* @website: https://jimmycai.com +* @link: https://github.com/CaiJimmy/hugo-theme-stack +*/