diff --git a/.nojekyll b/.nojekyll
new file mode 100644
index 00000000..e69de29b
diff --git a/404.html b/404.html
new file mode 100644
index 00000000..698a1697
--- /dev/null
+++ b/404.html
@@ -0,0 +1,5 @@
+
404 Page not found
Not Found
This page does not exist
+
\ No newline at end of file
diff --git a/CNAME b/CNAME
new file mode 100644
index 00000000..73478eab
--- /dev/null
+++ b/CNAME
@@ -0,0 +1 @@
+open-neuromorphic.org
\ No newline at end of file
diff --git a/about/ONM.png b/about/ONM.png
new file mode 100644
index 00000000..bc5eadbf
Binary files /dev/null and b/about/ONM.png differ
diff --git a/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_120x120_fill_box_smart1_3.png b/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_120x120_fill_box_smart1_3.png
new file mode 100644
index 00000000..cd761e89
Binary files /dev/null and b/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_120x120_fill_box_smart1_3.png differ
diff --git a/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_1600x0_resize_box_3.png b/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_1600x0_resize_box_3.png
new file mode 100644
index 00000000..f4226f6f
Binary files /dev/null and b/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_1600x0_resize_box_3.png differ
diff --git a/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_800x0_resize_box_3.png b/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_800x0_resize_box_3.png
new file mode 100644
index 00000000..6f390350
Binary files /dev/null and b/about/ONM_hu581cb6e07016d86819909a21b22e28f8_91275_800x0_resize_box_3.png differ
diff --git a/about/index.html b/about/index.html
new file mode 100644
index 00000000..27b87f61
--- /dev/null
+++ b/about/index.html
@@ -0,0 +1,9 @@
+About
This organisation is created by a loose collective of open source collaborators across academia, industry and individual contributors. Most of us have never met in person before but have started contributing to a common or each other’s projects! What connects us is the love for building tools that can be used in the neuromorphic community and we want to share ownership of this vision. If you feel like that resonates with you, please don’t hesitate to get in touch!
Open Neuromorphic (ONM) provides the following things:
A curated list of software frameworks to make it easier to find the tool you need.
A platform for your code. If you wish to create a new repository or migrate your existing code to ONM, please get in touch with us.
Educational content to get you started in the neuromorphic world.
Events about neuromorphic research and software, with contributions from both academia and industry.
Projects that we list here can fall into this non-exclusive list of categories:
Spiking Neural Networks (SNNs) training and/or inference, for both ML and neuroscience application.
Event-based sensors data handling.
Digital hardware designs for neuromorphic applications.
Mixed-signal hardware designs for neuromorphic applications.
Get in touch with us if you wish to give a talk, write an article or to know more about the neuromorphic world.
+
+
+
+
+
\ No newline at end of file
diff --git a/categories/example-category/index.html b/categories/example-category/index.html
new file mode 100644
index 00000000..d4f65fa8
--- /dev/null
+++ b/categories/example-category/index.html
@@ -0,0 +1,5 @@
+Category: Example Category - Open Neuromorphic
Categories
0 pages
Example Category
A description of this category
+
\ No newline at end of file
diff --git a/categories/example-category/index.xml b/categories/example-category/index.xml
new file mode 100644
index 00000000..c92851b0
--- /dev/null
+++ b/categories/example-category/index.xml
@@ -0,0 +1 @@
+Example Category on Open Neuromorphichttps://open-neuromorphic.org/categories/example-category/Recent content in Example Category on Open NeuromorphicHugo -- gohugo.ioen-us
\ No newline at end of file
diff --git a/categories/example-category/page/1/index.html b/categories/example-category/page/1/index.html
new file mode 100644
index 00000000..8b9d99c1
--- /dev/null
+++ b/categories/example-category/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/categories/example-category/
\ No newline at end of file
diff --git a/categories/index.html b/categories/index.html
new file mode 100644
index 00000000..78f79971
--- /dev/null
+++ b/categories/index.html
@@ -0,0 +1,5 @@
+Categories
+
\ No newline at end of file
diff --git a/categories/index.xml b/categories/index.xml
new file mode 100644
index 00000000..0719aa17
--- /dev/null
+++ b/categories/index.xml
@@ -0,0 +1 @@
+Categories on Open Neuromorphichttps://open-neuromorphic.org/categories/Recent content in Categories on Open NeuromorphicHugo -- gohugo.ioen-usExample Categoryhttps://open-neuromorphic.org/categories/example-category/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/categories/example-category/
\ No newline at end of file
diff --git a/categories/page/1/index.html b/categories/page/1/index.html
new file mode 100644
index 00000000..60d41ae8
--- /dev/null
+++ b/categories/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/categories/
\ No newline at end of file
diff --git a/events-recordings/catherine-schuman.webp b/events-recordings/catherine-schuman.webp
new file mode 100644
index 00000000..34000988
Binary files /dev/null and b/events-recordings/catherine-schuman.webp differ
diff --git a/events-recordings/catherine-schuman_huc056e41512c17dd339d4074ee71f8169_11002_1024x0_resize_q75_h2_box_2.webp b/events-recordings/catherine-schuman_huc056e41512c17dd339d4074ee71f8169_11002_1024x0_resize_q75_h2_box_2.webp
new file mode 100644
index 00000000..38e6a8c3
Binary files /dev/null and b/events-recordings/catherine-schuman_huc056e41512c17dd339d4074ee71f8169_11002_1024x0_resize_q75_h2_box_2.webp differ
diff --git a/events-recordings/catherine-schuman_huc056e41512c17dd339d4074ee71f8169_11002_480x0_resize_q75_h2_box_2.webp b/events-recordings/catherine-schuman_huc056e41512c17dd339d4074ee71f8169_11002_480x0_resize_q75_h2_box_2.webp
new file mode 100644
index 00000000..03901139
Binary files /dev/null and b/events-recordings/catherine-schuman_huc056e41512c17dd339d4074ee71f8169_11002_480x0_resize_q75_h2_box_2.webp differ
diff --git a/events-recordings/corradi.jpg b/events-recordings/corradi.jpg
new file mode 100644
index 00000000..58d66c53
Binary files /dev/null and b/events-recordings/corradi.jpg differ
diff --git a/events-recordings/giorgia-dellaferrera.jpeg b/events-recordings/giorgia-dellaferrera.jpeg
new file mode 100644
index 00000000..95f4b3f0
Binary files /dev/null and b/events-recordings/giorgia-dellaferrera.jpeg differ
diff --git a/events-recordings/giorgia-dellaferrera.jpg b/events-recordings/giorgia-dellaferrera.jpg
new file mode 100644
index 00000000..06525e30
Binary files /dev/null and b/events-recordings/giorgia-dellaferrera.jpg differ
diff --git a/events-recordings/giorgia-dellaferrera_hua839c980f4c66a1a65fd81ca426e703b_92518_1024x0_resize_q75_box.jpeg b/events-recordings/giorgia-dellaferrera_hua839c980f4c66a1a65fd81ca426e703b_92518_1024x0_resize_q75_box.jpeg
new file mode 100644
index 00000000..1092800d
Binary files /dev/null and b/events-recordings/giorgia-dellaferrera_hua839c980f4c66a1a65fd81ca426e703b_92518_1024x0_resize_q75_box.jpeg differ
diff --git a/events-recordings/giorgia-dellaferrera_hua839c980f4c66a1a65fd81ca426e703b_92518_480x0_resize_q75_box.jpeg b/events-recordings/giorgia-dellaferrera_hua839c980f4c66a1a65fd81ca426e703b_92518_480x0_resize_q75_box.jpeg
new file mode 100644
index 00000000..cef0f18d
Binary files /dev/null and b/events-recordings/giorgia-dellaferrera_hua839c980f4c66a1a65fd81ca426e703b_92518_480x0_resize_q75_box.jpeg differ
diff --git a/events-recordings/gregor-lenz.jpeg b/events-recordings/gregor-lenz.jpeg
new file mode 100644
index 00000000..fe60b3c1
Binary files /dev/null and b/events-recordings/gregor-lenz.jpeg differ
diff --git a/events-recordings/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_1024x0_resize_q75_box.jpeg b/events-recordings/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_1024x0_resize_q75_box.jpeg
new file mode 100644
index 00000000..18630779
Binary files /dev/null and b/events-recordings/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_1024x0_resize_q75_box.jpeg differ
diff --git a/events-recordings/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_480x0_resize_q75_box.jpeg b/events-recordings/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_480x0_resize_q75_box.jpeg
new file mode 100644
index 00000000..2bbd8587
Binary files /dev/null and b/events-recordings/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_480x0_resize_q75_box.jpeg differ
diff --git a/events-recordings/index.html b/events-recordings/index.html
new file mode 100644
index 00000000..c44e868f
--- /dev/null
+++ b/events-recordings/index.html
@@ -0,0 +1,15 @@
+Events recordings
Trevor Bekolay’s primary research interest is in learning and memory. In his Master’s degree, he explored how to do supervised, unsupervised, and reinforcement learning in networks of biologically plausible spiking neurons. In his PhD, he applied this knowledge to the domain of speech to explore how sounds coming into the ear become high-level linguistic representations, and how those representations become sequences of vocal tract movements that produce speech.
Trevor is also passionate about reproducible science, particularly when complex software pipelines are involved. In 2013, he started a development effort to reimplement the Nengo neural simulator from scratch in Python, which has now grown to a project with over 20 contributors around the world.
2023-02-14: Giorgia Dellaferrera, PEPITA - A forward-forward alternative to backpropagation
Giorgia Dellaferrera has completed her PhD in computational neuroscience at the Institute of Neuroinformatics (ETH Zurich and the University of Zurich) and IBM Research Zurich with Prof. Indiveri, Prof. Eleftheriou and Dr. Pantazi. Her doctoral thesis focused on the interplay between neuroscience and artificial intelligence, with an emphasis on learning mechanisms in brains and machines. During her PhD, she visited the lab of Prof. Kreiman at the Harvard Medical School (US), where she developed a biologically inspired training strategy for artificial neural networks. Before her PhD, Giorgia obtained a master in Applied Physics at the Swiss Federal Institute of Technology Lausanne (EPFL) and worked as an intern at the Okinawa Institute of Science and Technology, Logitech, Imperial College London, and EPFL.
2023-03-02: Jason Eshraghian, Hands-on session with snnTorch
Jason K. Eshraghian is an Assistant Professor at the Department of Electrical and Computer Engineering at UC Santa Cruz, CA, USA. Prior to that, he was a Post-Doctoral Researcher at the Department of Electrical Engineering and Computer Science, University of Michigan in Ann Arbor. He received the Bachelor of Engineering (Electrical and Electronic) and the Bachelor of Laws degrees from The University of Western Australia, WA, Australia in 2016, where he also completed his Ph.D. Degree.
Professor Eshraghian was awarded the 2019 IEEE VLSI Best Paper Award, the Best Paper Award at 2019 IEEE Artificial Intelligence CAS Conference, and the Best Live Demonstration Award at 2020 IEEE ICECS for his work on neuromorphic vision and in-memory computing using RRAM. He currently serves as the secretary-elect of the IEEE Neural Systems and Applications Committee, and was a recipient of the Fulbright Future Fellowship (Australian-America Fulbright Commission), the Forrest Research Fellowship (Forrest Research Foundation), and the Endeavour Fellowship (Australian Government).
2023-03-21: Catherine Schuman, Evolutionary Optimization for Neuromorphic Systems
Catherine (Katie) Schuman is an Assistant Professor in the Department of Electrical Engineering and Computer Science at the University of Tennessee (UT). She received her Ph.D. in Computer Science from UT in 2015, where she completed her dissertation on the use of evolutionary algorithms to train spiking neural networks for neuromorphic systems. Katie previously served as a research scientist at Oak Ridge National Laboratory, where her research focused on algorithms and applications of neuromorphic systems. Katie co-leads the TENNLab Neuromorphic Computing Research Group at UT. She has over 100 publications as well as seven patents in the field of neuromorphic computing. She received the Department of Energy Early Career Award in 2019.
2023-04-04: Gregor Lenz, Hands-on session with Sinabs and Speck
Gregor Lenz graduated with a Ph.D. in neuromorphic engineering from Sorbonne University. He thinks that technology can learn a thing or two from how biological systems process information.
His main interests are event cameras that are inspired by the human retina and spiking neural networks that mimic human brain in an effort to teach machines to compute a bit more like humans do. At the very least there are some power efficiency gains to be made, but hopefully more! Also he loves to build open source software for spike-based machine learning. You can find more information on his personal website.
He is the maintainer of two open source projects in the field of neuromorphic computing, Tonic and expelliarmus.
2023-04-26: Dylan Muir, Hands-on session with Xylo and Rockpool
Dylan Muir is the Vice President for Global Research Operations; Director for Algorithms and Applications; and Director for Global Business Development at SynSense. Dr. Muir is a specialist in architectures for neural computation. He has published extensively in computational and experimental neuroscience. At SynSense he is responsible for the company research vision, and directing development of neural architectures for signal processing. Dr. Muir holds a Doctor of Science (PhD) from ETH Zurich, and undergraduate degrees (Masters) in Electronic Engineering and in Computer Science from QUT, Australia.
2023-05-31: Andreas Wild & Mathis Richter, Lava - an open-source software framework for developing neuro-inspired applications.
Andreas Wild received the Dr. rer. nat degree in physics with a focus on the development of silicon-based electron spin qubits from the Technical University of Munich, Germany, in 2013. After joining Intel in 2013, he has been a Senior Researcher with the Intel Neuromorphic Computing Lab since 2015 where he leads algorithm research.
Mathis Richter is a Research Scientist in the Neuromorphic Computing Lab at Intel Labs, where he leads the Application Software team, developing commercial software solutions based on neuromorphic technology. Before joining Intel in 2021, he worked as a post doc and PhD student on neural process models of higher cognition at the Institute for Neural Computation, Ruhr-University Bochum.
Dr. Federico Corradi is an Assistant Professor in the Electrical Engineering Department. His research activities are in Neuromorphic Computing and Engineering and span from the development of efficient models of computation to novel microelectronic architectures, with CMOS and emerging technologies, for both efficient deep learning and brain-inspired algorithms. His long-term research goal is to understand the principles of computation in natural neural systems and apply those for the development of a new generation of energy-efficient sensing and computing technologies. His research outputs find use in several application domains as robotics, machine vision, temporal signal processing, and biomedical signal analysis.
Dr. Corradi received a Ph.D. degree from the University of Zurich in Neuroinformatics and an international Ph.D. from the ETH Neuroscience Centre Zurich in 2015. He was a Postgraduate at the Institute of Neuroinformatics in 2018. From 2015 to 2018, he worked in the Institute of Neuroinformatics’ spin-off company Inilabs, developing event-based cameras and neuromorphic processors. From 2018 to 2022, he was at IMEC, the Netherlands, where he started a group focusing on neuromorphic ICs design activities. His passion for research recently brought him back to academia while keeping strong ties with startups and companies.
He is an active review editor of Frontiers in Neuromorphic Engineering, IEEE, and other international journals. In addition, he currently serves as a technical program committee member of several machine learning and neuromorphic symposiums and conferences (ICTOPEN, ICONS, DSD, EUROMICRO).
2023-07-11: Konrad Kording, Does the brain do gradient descent?
Konrad Kording runs his lab at the University of Pennsylvania. Konrad is interested in the question of how the brain solves the credit assignment problem and similarly how we should assign credit in the real world (through causality). In extension of this main thrust he is interested in applications of causality in biomedical research. Konrad has trained as student at ETH Zurich with Peter Konig, as postdoc at UCL London with Daniel Wolpert and at MIT with Josh Tenenbaum. After a decade at Northwestern University, he is now PIK professor at UPenn.
2023-07-19: Lana Josipović, From C/C++ to Dynamically Scheduled Circuits
Lana Josipović is an Assistant Professor in the Department of Information Technology and Electrical Engineering at ETH Zurich. Prior to joining ETH Zurich in January 2022, she received a Ph.D. degree in Computer Science from EPFL, Switzerland. Her research interests include reconfigurable computing and electronic design automation, with an emphasis on high-level synthesis techniques to generate hardware designs from high-level programming languages. She developed Dynamatic, an open-source high-level synthesis tool that produces dynamically scheduled circuits from C/C++ code. She is a recipient of the EDAA Outstanding Dissertation Award, Google Ph.D. Fellowship in Systems and Networking, Google Women Techmakers Scholarship, and Best Paper Award at FPGA'20.
+
+
+
+
+
\ No newline at end of file
diff --git a/events-recordings/jason-eshraghian.webp b/events-recordings/jason-eshraghian.webp
new file mode 100644
index 00000000..ffe17a39
Binary files /dev/null and b/events-recordings/jason-eshraghian.webp differ
diff --git a/events-recordings/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_1024x0_resize_q75_h2_box_2.webp b/events-recordings/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_1024x0_resize_q75_h2_box_2.webp
new file mode 100644
index 00000000..f5a6676b
Binary files /dev/null and b/events-recordings/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_1024x0_resize_q75_h2_box_2.webp differ
diff --git a/events-recordings/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_480x0_resize_q75_h2_box_2.webp b/events-recordings/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_480x0_resize_q75_h2_box_2.webp
new file mode 100644
index 00000000..b609d50c
Binary files /dev/null and b/events-recordings/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_480x0_resize_q75_h2_box_2.webp differ
diff --git a/events-recordings/konrad-kording.jpg b/events-recordings/konrad-kording.jpg
new file mode 100644
index 00000000..3f13b6d6
Binary files /dev/null and b/events-recordings/konrad-kording.jpg differ
diff --git a/events-recordings/lana-josipovic.jpg b/events-recordings/lana-josipovic.jpg
new file mode 100644
index 00000000..7c1a777f
Binary files /dev/null and b/events-recordings/lana-josipovic.jpg differ
diff --git a/events-recordings/lava-intel.png b/events-recordings/lava-intel.png
new file mode 100644
index 00000000..627fc961
Binary files /dev/null and b/events-recordings/lava-intel.png differ
diff --git a/events-recordings/trevor-bekolay.jpeg b/events-recordings/trevor-bekolay.jpeg
new file mode 100644
index 00000000..bb679d69
Binary files /dev/null and b/events-recordings/trevor-bekolay.jpeg differ
diff --git a/events-recordings/trevor-bekolay_hufbec93062349485948350099f8024531_51540_1024x0_resize_q75_box.jpeg b/events-recordings/trevor-bekolay_hufbec93062349485948350099f8024531_51540_1024x0_resize_q75_box.jpeg
new file mode 100644
index 00000000..316bcbbf
Binary files /dev/null and b/events-recordings/trevor-bekolay_hufbec93062349485948350099f8024531_51540_1024x0_resize_q75_box.jpeg differ
diff --git a/events-recordings/trevor-bekolay_hufbec93062349485948350099f8024531_51540_480x0_resize_q75_box.jpeg b/events-recordings/trevor-bekolay_hufbec93062349485948350099f8024531_51540_480x0_resize_q75_box.jpeg
new file mode 100644
index 00000000..2e300c4c
Binary files /dev/null and b/events-recordings/trevor-bekolay_hufbec93062349485948350099f8024531_51540_480x0_resize_q75_box.jpeg differ
diff --git a/events/2023-01-26-Nengo.pdf b/events/2023-01-26-Nengo.pdf
new file mode 100644
index 00000000..4d6a531d
Binary files /dev/null and b/events/2023-01-26-Nengo.pdf differ
diff --git a/events/2023-02-14-Giorgia-Dellaferrera.pdf b/events/2023-02-14-Giorgia-Dellaferrera.pdf
new file mode 100644
index 00000000..18c16f98
Binary files /dev/null and b/events/2023-02-14-Giorgia-Dellaferrera.pdf differ
diff --git a/events/catherine-schuman.webp b/events/catherine-schuman.webp
new file mode 100644
index 00000000..34000988
Binary files /dev/null and b/events/catherine-schuman.webp differ
diff --git a/events/corradi.jpg b/events/corradi.jpg
new file mode 100644
index 00000000..58d66c53
Binary files /dev/null and b/events/corradi.jpg differ
diff --git a/events/giorgia-dellaferrera.jpeg b/events/giorgia-dellaferrera.jpeg
new file mode 100644
index 00000000..95f4b3f0
Binary files /dev/null and b/events/giorgia-dellaferrera.jpeg differ
diff --git a/events/giorgia-dellaferrera.jpg b/events/giorgia-dellaferrera.jpg
new file mode 100644
index 00000000..06525e30
Binary files /dev/null and b/events/giorgia-dellaferrera.jpg differ
diff --git a/events/giulia-dangelo.jpg b/events/giulia-dangelo.jpg
new file mode 100644
index 00000000..2a1a6a4d
Binary files /dev/null and b/events/giulia-dangelo.jpg differ
diff --git a/events/gregor-lenz.jpeg b/events/gregor-lenz.jpeg
new file mode 100644
index 00000000..fe60b3c1
Binary files /dev/null and b/events/gregor-lenz.jpeg differ
diff --git a/events/index.html b/events/index.html
new file mode 100644
index 00000000..c3f50b32
--- /dev/null
+++ b/events/index.html
@@ -0,0 +1,9 @@
+Events
Events organised by ONM: talks, hands-on sessions and more.
Upcoming events
Join our newsletter to be updated on new events and get a reminder!
2023-09-25: Giulia D’Angelo, What’s catching your eye? The visual attention mechanism
Time
6PM-7:30PM, CEST.
Abstract
Every agent, whether animal or robotic, needs to process its visual sensory input in an efficient way, to allow understanding of, and interaction with, the environment. The process of filtering revelant information out of the continuous bombardment of complex sensory data is called selective attention. Visual attention is the result of the complex interplay between bottom-up and top-down mechanisms to perceptually organise and understand the scene. Giulia will describe how to approach visual attention using bio-inspired models emulating the human visual system to allow robots to interact with their surroundings.
Speaker’s bio
Giulia D’Angelo is a postdoctoral researcher in neuroengineering in the EDPR laboratory at the Italian Institute of Technology. She obtained a B.Sc. in biomedical engineering and an M.Sc. in neuroengineering, developing a neuromorphic visual system at the King’s College of London. She successfully defended her Ph.D. VIVA in 2022 at the university of Manchester, proposing a biologically plausible model for event-driven saliency-based visual attention. She is currently working on bio-inspired visual algorithms exploiting neuromorphic platforms.
2023-10-22: Innatera
TBD.
2023-11-16: Timoleon Moraitis, Making neuromorphic computing mainstream
Time
6PM-7PM, CET.
Abstract
TBD.
Speaker’s bio
TBD.
Tobias Fischer
Abstract
TBD.
Speaker’s bio
Tobias conducts interdisciplinary research at the intersection of intelligent robotics, computer vision, and computational cognition. His main goal is to develop high-performing, bio-inspired computer vision algorithms that simultaneously examine animals/humans and robots’ perceptional capabilities.
He is a Lecturer (Assistant Professor) in Queensland University of Technology’s Centre for Robotics. He joined the Centre as an Associate Investigator and Research Fellow in January 2020. Previously, he was a postdoctoral researcher in the Personal Robotics Lab at Imperial College London.
He received a PhD from Imperial College in January 2019. His thesis was awarded the UK Best Thesis in Robotics Award 2018 and the Eryl Cadwaladr Davies Award for the best thesis in Imperial’s EEE Department in 2017-2018. He previously received an M.Sc. degree (distinction) in Artificial Intelligence from The University of Edinburgh in 2014 and a B.Sc. degree in Computer Engineering from Ilmenau University of Technology, Germany, in 2013. His works have attracted two best poster awards, one best paper award, and he was the senior author of the winning submission to the Facebook Mapillary Place Recognition Challenge 2020.
+
+
+
+
+
\ No newline at end of file
diff --git a/events/innatera.png b/events/innatera.png
new file mode 100644
index 00000000..428b319d
Binary files /dev/null and b/events/innatera.png differ
diff --git a/events/jason-eshraghian.webp b/events/jason-eshraghian.webp
new file mode 100644
index 00000000..ffe17a39
Binary files /dev/null and b/events/jason-eshraghian.webp differ
diff --git a/events/konrad-kording.jpg b/events/konrad-kording.jpg
new file mode 100644
index 00000000..3f13b6d6
Binary files /dev/null and b/events/konrad-kording.jpg differ
diff --git a/events/lana-josipovic.jpg b/events/lana-josipovic.jpg
new file mode 100644
index 00000000..7c1a777f
Binary files /dev/null and b/events/lana-josipovic.jpg differ
diff --git a/events/lava-intel.png b/events/lava-intel.png
new file mode 100644
index 00000000..627fc961
Binary files /dev/null and b/events/lava-intel.png differ
diff --git a/events/timoleon-moraitis.png b/events/timoleon-moraitis.png
new file mode 100644
index 00000000..3bab2822
Binary files /dev/null and b/events/timoleon-moraitis.png differ
diff --git a/events/tobias-fischer.webp b/events/tobias-fischer.webp
new file mode 100644
index 00000000..e7ddf86e
Binary files /dev/null and b/events/tobias-fischer.webp differ
diff --git a/events/trevor-bekolay.jpeg b/events/trevor-bekolay.jpeg
new file mode 100644
index 00000000..bb679d69
Binary files /dev/null and b/events/trevor-bekolay.jpeg differ
diff --git a/getting-involved/index.html b/getting-involved/index.html
new file mode 100644
index 00000000..b2374858
--- /dev/null
+++ b/getting-involved/index.html
@@ -0,0 +1,13 @@
+Getting involved
The easiest way to get in touch is probably through Discord, where we discuss research topics, job opportunities, open hardware, spiking neural network training and much more. We’d be delighted to have you join!
+If you feel like contributing to ONM but you’re not exactly sure how, here are some ideas to get you started:
Link an interesting open source repository to our collection so that others can find it too! This can be a framework / package that deals with neuromorphic things or a neat implementation for example! ONM is meant as a platform to showcase your code and others'!
Write a blog post together with the community. If you think you learned something useful that you’d like to share with the community, you can simply post your draft on our Discord to ask for some feedback. In case you then want to publish it on our website, take a look at the structure of existing posts and open a PR for a new one. Think about it as a mini paper!
Why not host your code in the ONM organisation directly? It’ll boost visibility and you can get instant help/feedback from community members. You can also migrate an existing repository if you wish to do so.
+
+
+
+
+
\ No newline at end of file
diff --git a/img/ONM-logo.png b/img/ONM-logo.png
new file mode 100644
index 00000000..486c216e
Binary files /dev/null and b/img/ONM-logo.png differ
diff --git a/img/ONM.png b/img/ONM.png
new file mode 100644
index 00000000..bc5eadbf
Binary files /dev/null and b/img/ONM.png differ
diff --git a/index.html b/index.html
new file mode 100644
index 00000000..2f59926a
--- /dev/null
+++ b/index.html
@@ -0,0 +1,6 @@
+Open Neuromorphic
In this article, we will try to model a Leaky Spiking Neuron (LIF) using digital hardware: registers, memories, adders and so on.
+
\ No newline at end of file
diff --git a/index.xml b/index.xml
new file mode 100644
index 00000000..9a5ad425
--- /dev/null
+++ b/index.xml
@@ -0,0 +1,14 @@
+Open Neuromorphichttps://open-neuromorphic.org/Recent content on Open NeuromorphicHugo -- gohugo.ioen-usWed, 02 Aug 2023 00:00:00 +0000SNN library benchmarkshttps://open-neuromorphic.org/p/snn-library-benchmarks/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/p/snn-library-benchmarks/<img src="https://open-neuromorphic.org/p/snn-library-benchmarks/framework-benchmarking-16k-header.png" alt="Featured image of post SNN library benchmarks" />SNN library benchmarks Open Neuromorphic’s list of SNN frameworks currently counts 10 libraries, and those are only the most popular ones! As the sizes of spiking neural network models grow thanks to deep learning, optimization becomes more important for researchers and practitioners alike. Training SNNs is often slow, as the stateful networks are typically fed sequential inputs. Today’s most popular training method then is some form of backpropagation through time, whose time complexity scales with the number of time steps.Bits of Chips | TrueNorthhttps://open-neuromorphic.org/p/bits-of-chips-truenorth/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/p/bits-of-chips-truenorth/<img src="https://open-neuromorphic.org/p/bits-of-chips-truenorth/brain-to-chip.png" alt="Featured image of post Bits of Chips | TrueNorth" />Why do we want to emulate the brain? If you have ever read an article on neuromorphic computing, you might have noticed that in the introduction of each of these there is the same statement: “The brain is much powerful than any AI machine when it comes to cognitive tasks but it runs on a 10W power budget!”. This is absolutely true: neurons in the brain communicate among each other by means of spikes, which are short voltage pulses that propagate from one neuron to the other.Efficient compression for event-based datahttps://open-neuromorphic.org/p/efficient-compression-for-event-based-data/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/<img src="https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/file_read_benchmark.png" alt="Featured image of post Efficient compression for event-based data" />Efficient compression for event-based data Datasets grow larger in size As neuromorphic algorithms tackle more complex tasks that are linked to bigger datasets, and event cameras mature to have higher spatial resolution, it is worth looking at how to encode that data efficiently when storing it on disk. To give you an example, Prophesee’s latest automotive object detection dataset is some 3.5 TB in size for under 40h of recordings with a single camera.Digital neuromophic hardware read listhttps://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/<img src="https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png" alt="Featured image of post Digital neuromophic hardware read list" />Here’s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles!
+If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here.
+2015 TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al.Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer.
+In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron.Open Neuromorphichttps://open-neuromorphic.org/p/open-neuromorphic/Wed, 21 Dec 2022 00:00:00 +0000https://open-neuromorphic.org/p/open-neuromorphic/<img src="https://open-neuromorphic.org/p/open-neuromorphic/ONM.png" alt="Featured image of post Open Neuromorphic" />This organisation is created by a loose collective of open source collaborators across academia, industry and individual contributors. What connects us is the love for building tools that can be used in the neuromorphic community and we want to share ownership of this vision.
+Open Neuromorphic (ONM) provides the following things:
+A curated list of software frameworks to make it easier to find the tool you need. A platform for your code.Abouthttps://open-neuromorphic.org/about/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/about/<img src="https://open-neuromorphic.org/about/ONM.png" alt="Featured image of post About" />This organisation is created by a loose collective of open source collaborators across academia, industry and individual contributors. Most of us have never met in person before but have started contributing to a common or each other’s projects! What connects us is the love for building tools that can be used in the neuromorphic community and we want to share ownership of this vision. If you feel like that resonates with you, please don’t hesitate to get in touch!Eventshttps://open-neuromorphic.org/events/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/events/<img src="https://open-neuromorphic.org/img/ONM.png" alt="Featured image of post Events" />Upcoming events Join our newsletter to be updated on new events and get a reminder!
+2023-09-25: Giulia D’Angelo, What’s catching your eye? The visual attention mechanism Giulia D’Angelo
+Time 6PM-7:30PM, CEST.
+Abstract Every agent, whether animal or robotic, needs to process its visual sensory input in an efficient way, to allow understanding of, and interaction with, the environment. The process of filtering revelant information out of the continuous bombardment of complex sensory data is called selective attention.Events recordingshttps://open-neuromorphic.org/events-recordings/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/events-recordings/2023-01-26: Trevor Bekolay, Nengo - Applied Brain Research Recording https://youtu.be/sgu9l_bqAHM
+Slides click here
+Speaker’s bio Trevor Bekolay’s primary research interest is in learning and memory. In his Master’s degree, he explored how to do supervised, unsupervised, and reinforcement learning in networks of biologically plausible spiking neurons. In his PhD, he applied this knowledge to the domain of speech to explore how sounds coming into the ear become high-level linguistic representations, and how those representations become sequences of vocal tract movements that produce speech.Getting involvedhttps://open-neuromorphic.org/getting-involved/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/getting-involved/The easiest way to get in touch is probably through Discord, where we discuss research topics, job opportunities, open hardware, spiking neural network training and much more. We’d be delighted to have you join! If you feel like contributing to ONM but you’re not exactly sure how, here are some ideas to get you started:
+Link an interesting open source repository to our collection so that others can find it too!Resourceshttps://open-neuromorphic.org/resources/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/resources/Please check our Github repository for a list of neuromorphic open source software and hardware!Teamhttps://open-neuromorphic.org/team/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/team/Fabrizio Ottati Fabrizio Ottati
+Fabrizio Ottati is a Ph.D. student in the Department of Electronics and Communications of Politecnico di Torino, under the supervision of professor Luciano Lavagno and professor Mario Roberto Casu.
+His main interests are event-based cameras, digital hardware design and automation, spiking neural networks and piedmontese red wine. He is the maintainer of two open source projects in the field of neuromorphic computing, Tonic and Expelliarmus. You can find more information on his website.
\ No newline at end of file
diff --git a/onm-events/event-22-12-13/slides/charlotte-frenkel.pdf b/onm-events/event-22-12-13/slides/charlotte-frenkel.pdf
new file mode 100644
index 00000000..79e9c5f6
Binary files /dev/null and b/onm-events/event-22-12-13/slides/charlotte-frenkel.pdf differ
diff --git a/onm-events/event-22-12-13/slides/fabrizio-ottati.pdf b/onm-events/event-22-12-13/slides/fabrizio-ottati.pdf
new file mode 100644
index 00000000..dd0b18ba
Binary files /dev/null and b/onm-events/event-22-12-13/slides/fabrizio-ottati.pdf differ
diff --git a/p/bits-of-chips-truenorth/brain-to-chip.png b/p/bits-of-chips-truenorth/brain-to-chip.png
new file mode 100644
index 00000000..3d572ff3
Binary files /dev/null and b/p/bits-of-chips-truenorth/brain-to-chip.png differ
diff --git a/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_120x120_fill_box_smart1_3.png b/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_120x120_fill_box_smart1_3.png
new file mode 100644
index 00000000..cb4f20bb
Binary files /dev/null and b/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_120x120_fill_box_smart1_3.png differ
diff --git a/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_1600x0_resize_box_3.png b/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_1600x0_resize_box_3.png
new file mode 100644
index 00000000..57f2b2ba
Binary files /dev/null and b/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_1600x0_resize_box_3.png differ
diff --git a/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_800x0_resize_box_3.png b/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_800x0_resize_box_3.png
new file mode 100644
index 00000000..740d126a
Binary files /dev/null and b/p/bits-of-chips-truenorth/brain-to-chip_hu9771bc0e82bc872d2b8f49c5ba507c44_1564085_800x0_resize_box_3.png differ
diff --git a/p/bits-of-chips-truenorth/crossbar.png b/p/bits-of-chips-truenorth/crossbar.png
new file mode 100644
index 00000000..c4da7897
Binary files /dev/null and b/p/bits-of-chips-truenorth/crossbar.png differ
diff --git a/p/bits-of-chips-truenorth/imc.jpg b/p/bits-of-chips-truenorth/imc.jpg
new file mode 100644
index 00000000..c7a8c0f4
Binary files /dev/null and b/p/bits-of-chips-truenorth/imc.jpg differ
diff --git a/p/bits-of-chips-truenorth/index.html b/p/bits-of-chips-truenorth/index.html
new file mode 100644
index 00000000..6fcd5931
--- /dev/null
+++ b/p/bits-of-chips-truenorth/index.html
@@ -0,0 +1,32 @@
+Bits of Chips | TrueNorth
If you have ever read an article on neuromorphic computing, you might have noticed that in the introduction of each of these there is the same statement: “The brain is much powerful than any AI machine when it comes to cognitive tasks but it runs on a 10W power budget!”. This is absolutely true: neurons in the brain communicate among each other by means of spikes, which are short voltage pulses that propagate from one neuron to the other. The average spiking activity is estimated to be around 10Hz (i.e. a spike every 100ms). This yields very low processing power consumption, since the activity in the brain results to be really sparse (at least, this is the hypothesis).
How can the brain do all this? There are several reasons (or hypotheses, I should say):
the 3D connectivity among neurons. While in nowadays chip we can place connections among logic gates and circuits only in the 2D space, in the brain we have the whole 3D space at our disposal; this allows the mammalian brain to reach a fanout in the order or 10 thousand connections per neuron.
extremely low power operation. Trough thousands of years of evolution, the most power efficient “brain implementation” has won, since the ones that consume less energy to live are the ones that turn out to survive when there is no food (not entirely correct but I hope that true scientists won’t kill me). The power density in the brain is estimated to be 10mW per squared centimeter, while in a modern digital processor we easily reach 100W per squared centimeter.
Hence, IBM decide to try to emulate the brain with TrueNorth, a 4096 cores chip packing 1 million neurons and 256 million synapses. Let’s dive into its design!
Introduction
The TrueNorth design has been driven by seven principles.
Purely event-driven architecture
The architecture is a purely event-driven one, being Globally Asynchronous Locally Synchronous (GALS), with a completely asynchronous interconnection fabric among the synchronous cores. What does this actually mean?
In general, in a GALS architecture, there is an array of processing elements (PEs) which are synchronised through a global clock. The local clocks in the PEs can be different for each of them, since each PE may be running at a different speed. When two different clock domains have to be interfaced, the communication among them is effectively asynchronous: handshake protocols have to be implement among these in order to guarantee proper global operation.
In TrueNorth, as in SpiNNaker, there is no global clock: the PEs, which are neurosynaptic cores, are interconnected through a completely asynchronous network. In this way, the chip operations is event-driven, since the network gets activated only when there are spikes (and other kind of events) to be transmitted.
Low power operation
The CMOS process employed is a low-power one, with the goal of minimising static power. The technology node is 28nm CMOS.
Massive parallelism
Since the brain is a massively parallel architecture, employing 100 billion neurons each of which has a fanout of approximately 10 thousand synapses, parallelism is a key feature of TrueNorth: the chip employs 1 million neurons and 256 million synapses, by interconnecting 4096 cores, each of which models 256 neurons and 64 thousand synapses.
Real time operation
The authors claim real-time operation, which translates to a global time synchronisations of 1ms, i.e. the neurons are updated and spike every millisecond.
Scalable design
The architecture is scalable: multiple cores can be put together and, since the clock signal is distributed only locally, in the core fabric, the global clock signal skew problem of modern VLSI digital circuits does not affect TrueNorth.
Error tolerance
Redundancy is employed in the design, especially in the memory circuits, to make the chip tolerant to defects.
One-to-one correspondence between software and hardware
The chip operation corresponds perfectly to the software operation, when using the IBM TrueNorth application design software.
Designing an asynchronous circuit is a very difficult task, since no VLSI EDAs are available for this kind of design (well, actually now there is a research-level one); hence, the TrueNorth designers have decided to use conventional EDAs for the synchronous cores design and custom design tools and flows for the asynchronous interconnection fabric.
Architecture
Who’s Von Neumann?
The TrueNorth chip is not a Von Neumann machine! But what does this mean?
In a Von Neumann machine, like the one depicted above the processing unit is separated from the memory one, which stores both data and instructions. The processor reads the instructions from the memory, decodes them, retrieves the data on which it needs to operate from the same memory and, then, executes the instructions.
A neuromorphic chip, in principle, is an in-memory computing architecture: in this, there are not a central memory and a central processing unit, but storage and computational circuitry are distributed, i.e. we have many small memories and small computational units, like it is shown in the figure below.
There are two main advantages to this approach:
lower energy consumption associated to memory accesses. The main power consumption involved in a memory access is the one corresponding to the bus data movement. A data bus, simplifying, is a big $RC$ circuit, and every time we make the signals on it change, we consume a lot of power to drive this equivalent circuit. One can easily deduce that the value of both resistance and capacitance are directly proportional to the bus length! Hence, by putting the processing element (PE) and memory close to each other, we reduce the data movement power consumption.
lower latency associated to memory accesses. A big $RC$ circuit (i.e. a long bus) is also slower than a short one (i.e. the time constant associated to the equivalent circuit is larger); hence, by shortening its length, we also reduce the time needed to read or write data to the memory.
high parallelism. The PEs can work all in parallel, since each of these can access its owns data independently of the other PEs.
However, there is no such thing as a free lunch: this kind of architecture comes with one big disadvantage, among others: area occupation of the circuit.
+In a Von Neumann architecture, the memory density is higher: in VLSI circuits, the larger the memory, the higher the number of bits you can memorise per squared micrometer; hence, the total area occupied by the memories in an in-memory computing architecture is larger than the one corresponding to a Von-Neumann circuit. Moreover, you have multiple PEs performing the same operation on different data (this kind of architecture is also called Single Instruction Multiple Data (SIMD)); with a central memory, you can use a single PE to perform the same operations, saving lots of chip area at the expense of performance, since you cannot perform operations in parallel.
Memory and computation co-location: emulating the brain
In TrueNorth, a mixed approach has been adopted: a neurosynaptic core packs 256 neurons in memory, which share the same PE; 4096 cores are arranged in an in-memory computing fashion for the advantages cited before. However, what is a neuron?
A neuron is made of different parts, that are shown in the figure above. Dendrites branch out from the cell body, also called soma, where the nucleus is located. Then, there is a long communication channel called axon, which ends in the pre-synaptic terminal, which can have multiple branches.
Dendrites branch out from the soma. Their function is to receive information from other neurons. Some dendrites have small protrusions called spines that are important for communicating with other neurons.
The soma is where the computation happens. This is where the membrane potential is built up, by ions exchange with the environment and other neurons.
The axon is the communication channel of the neuron. It is attached to the neuron through the axon hillock; at the end of the axon, we find the pre-synaptic terminals, which are the “pins” used to connect to the post-synaptic terminal of other neurons. These connections are called synapses.
The axon terminates at the pre-synaptic terminal or terminal bouton. The terminal of the pre-synaptic cell forms a synapse with another neuron or cell, known as the post-synaptic cell. When the action potential reaches the pre-synaptic terminal, the neuron releases neurotransmitters into the synapse. The neurotransmitters act on the post-synaptic cell. Therefore, neuronal communication requires both an electrical signal (the action potential) and a chemical signal (the neurotransmitter). Most commonly, pre-synaptic terminals contact dendrites, but terminals can also communicate with cell bodies or even axons. Neurons can also synapse on non-neuronal cells such as muscle cells or glands.
The terms pre-synaptic and post-synaptic are in reference to which neuron is releasing neurotransmitters and which is receiving them. Pre-synaptic cells release neurotransmitters into the synapse and those neurotransmitters act on the post-synaptic cell.
The axon transmit an action potential, which is the famous spike! This results in the release of chemical neurotransmitters to communicate with other cells. Here’s a nice video to show it in action (source).
From biology to silicon
In a neuromorphic chip, hence, memory and computational units are co-located. The neuron constitutes the computational unit, while the synapses weights and the membrane potential are the data on which the neuron operates. The chip is programmed by deciding which neurons are connected to which; hence, we do not write instructions to be executed to a memory, but we program the neurons interconnections and parameters!
In the figure above, the logical representation of a TrueNorth core is reported. Consider the sub-figure on the left: on the right, the post-synaptic neurons are represented with a triangular shape, and these are connected to some neurons on the left, which outputs are represented by those AND-gate-shaped objects. It is an example of fully-connected layer in artificial neural networks.
In the sub-figure on the right, the logic implementation of this layer is depicted. Input spikes are collected in buffers: since in the chip the spikes are evaluated periodically (there is a clock tick distributed every 1ms), we need to store these until we can evaluate them; for these reason, we need local storage. Which spike is delivered to which neuron is determined by the connectivity, here illustrated through a crossbar: a dot on a wire represents a connection between the corresponding post-synaptic neuron dendrite (vertical wires) and the pre-synaptic neuron axon terminals (horizontal wires); this connection is the synapse, and its “strength” is the synapse weight.
When the clock tick arrives, the neurons process the incoming spikes and, if they have to, they spike and send these to the network of neurons. We can have local connections (i.e. the spikes are redistributed in the chip) or global connections (the spikes are delivered outside the chip through the Network-on-Chip (NoC).
There are some additional blocks, such as the Pseudo Random Number Generator (PRNG), that are used for more complex features, such as stochastic spike integration, stochastic leakage, stochastic thresholds, and so on.
Neuron model
Let’s get to the equations now! The neuron model employed in TrueNorth is the Leaky Integrate and Fire (LIF) one. The update equation is the following:
$V_{j}[t]$ represents the membrane potential of the $j$-th post-synaptic neuron at timestamp $t$.
in a TrueNorth core, each post-synaptic neuron can be connected to 256 pre-synaptic neurons; this is why the sum spans from $i=0$ to $i=255$.
$A_{i}[t]$ corresponds to the $i$-th pre-synaptic neuron spike: it is equal to 1 if that neurons has spiked at timestamp $t$ and 0 otherwise.
$w_{i,j}$ is a binary variable that the determines if the $i$-th pre-synaptic neuron is connected to the $j$-th post-synaptic neuron: when they are, $w_{i,j}=1$, otherwise $w_{i,j}=0$.
$s_{j}^{G_{i}}$ determines the strength of the connection, i.e. the synapse weight value. In TrueNorth, there are four types of axons, and the axon of the $i$-th pre-synaptic neuron is identified by a value of the variable $G_{i} \in \{1,2,3,4\}$; the dendrite of the $j$-th post-synaptic neuron is identified by $s_{j}$.
$\lambda_{j}$ is the leakage value. At each timestamp $t$, this fixed quantity is subtracted from the membrane potential.
In the equation, the spike mechanism is missing! The authors denote the spiking threshold of the $j$-th neuron with $\alpha_{j}$: when $V_{j}[t] \gt \alpha_{j}$, the neuron potential is reset to the rest one, denoted with $R_{j}$. The following is the pseudo-code, employing the C ternary operator (LaTeX in Markdown is a mess):
One may be wondering what the PRNG block stands for in the figure: it is a pseudo random number generator, and it is used to provide stochastic spike integration, leakage and threshold for the post-synaptic neurons.
What we have discussed until now is the model of a TrueNorth core: in this, 256 neurons are placed, each of which can be connected to other 256 neurons, which can be in the same core but also out of it. This is accomplished by interconnecting the neurons and multiple cores using a 2D mesh network; here comes the fun stuff, because the out-of-chip communication is completely asynchronous. Are you ready for an headache?
To be continued…
Stay tuned for episode II, in which we will dive in the theory behind the asynchronous design of TrueNorth :)
Authors
Fabrizio Ottati is a Ph.D. student in the HLS Laboratory of the Department of Electronics and Communications, Politecnico di Torino. His main interests are event-based cameras, digital hardware design and neuromorphic computing. He is one of the maintainers of two open source projects in the field of neuromorphic computing, Tonic and Expelliarmus, and one of the founders of Open Neuromorphic.
+
\ No newline at end of file
diff --git a/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png b/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png
new file mode 100644
index 00000000..4b15f0b4
Binary files /dev/null and b/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png differ
diff --git a/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_120x120_fill_box_smart1_3.png b/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_120x120_fill_box_smart1_3.png
new file mode 100644
index 00000000..ffc0620e
Binary files /dev/null and b/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_120x120_fill_box_smart1_3.png differ
diff --git a/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_1600x0_resize_box_3.png b/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_1600x0_resize_box_3.png
new file mode 100644
index 00000000..e9464ba5
Binary files /dev/null and b/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_1600x0_resize_box_3.png differ
diff --git a/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_800x0_resize_box_3.png b/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_800x0_resize_box_3.png
new file mode 100644
index 00000000..c4109523
Binary files /dev/null and b/p/digital-neuromophic-hardware-read-list/frenkel-thesis_hu87aec3c1b95cb5a45016ddc34478f587_199946_800x0_resize_box_3.png differ
diff --git a/p/digital-neuromophic-hardware-read-list/index.html b/p/digital-neuromophic-hardware-read-list/index.html
new file mode 100644
index 00000000..5fa74f3c
--- /dev/null
+++ b/p/digital-neuromophic-hardware-read-list/index.html
@@ -0,0 +1,15 @@
+Digital neuromophic hardware read list
List of research articles related to digital hardware for neuromorphic applications.
Here’s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles!
If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here.
This a fully digital chip, embedding 4096 cores with 1M neurons and 256M synapses!
It adopts a mixed design methodology: the local computational cores are synchronous, while the interconnecting infrastructure is asynchronous, i.e. event-driven. In particular, each core adopts time-multiplexing to compute the states of its neurons minimizing core area; each core has 256 neurons associated.
TrueNorth claims to be operating in real-time: a 1KHz synchronization signal is used to trigger the cores computations (state update, spikes processing, etc.). Moreover, they provide a software tool with one-to-one mapping to the hardware in order to deploy applications on it.
Probably the most popular neuromorphic processor right now. What distinguishes it from the other ones are the online learning capabilities coupled with a completely asynchronous design: cores and routing network are completely clock-less!
Loihi supports local and scalable learning rules, through spike traces corresponding to filtered pre-synaptic and post-synaptic spike trains with configurable time constants, multiple state variables per synapse in addition to the weight value, reward traces. Moreover, several computational primitives are provided: addition of stochastic noise to the neuron’s synaptic current response; configurable and adaptable synaptic, axon and refractory delays; configurable dendtritic tree processing; neuron threshold adaptiation; scaling and saturation of synaptic weights.
The Loihi chip employs 128 neuromorphic cores, each of which consisting of 1024 primitive spiking neural units. Each Loihi core includes a programmable learning engine. Each core has an 2Mb SRAM memory on-chip, with ECC overhead included. The chip is fabbed in Intel’s 14nm FinFET process.
In this paper, a digital neuromorphic processor is presented. The Verilog is also open source!
The neurons states and the synapses weights are stored in two foundry SRAMs on chip. In order to emulate a crossbar, time-multiplexing is adopted: the synapses weights and neurons states are updated in a sequential manner instead of in parallel. On the core, 256 neurons (4kB SRAM) and 256x256 synapses (64kB SRAM) are embedded. This allows to get a very high synapses and neuron densities: 741k synapses per squared millimiters and 3k neurons per squared millimeters, using a 28nm CMOS FDSOI process.
The neuron model is programmable through an SPI interface: the user can choose among a LIF model (8 bits for the state of each neuron) and Izhikevic one (55 bits for the state of each neuron). Online-learning capabilities are allowed with an hardware-efficient implementation of the Spike-Driven Synaptic Plasticity (SDSP) rule.
The design is fully synchronous. The time evolution of the SNN implemented on the core can be tuned choosing changing the frequency of the time reference events, allowing to update the neurons states only when events actually take place.
+The result is that each Synaptic OPeration (SOP) requires only 12.7pJ when the chip is powered with a voltage of 0.55V.
In this work, a quad-core neuromorphic processor is presented.
The neuron model employed is the LIF one. Synapses are quantized down to 1 bit resolution, and online learning is allowed using a stochastic version of the SDSP rule. The chip is produced in 65nm CMOS, embedding 2k LIF neurons and 2M synapses, reaching a density of 738k synapses per squared millimeters.
The neurons interconnection is arranged in a hierarchical routing solution: mesh-based interconnectivity for out-of-chip communications; star-based connectivity for inter-core communications; crossbar-based interconnectivity for intra-core communications. 27 bits per neuron are allocated, allowing for a 1k neurons fan-in for each neuron, and 2k neurons fan-out for each neuron.
In this work, a synchronous architecture is proposed. The logic operates at Near Threshold Voltage (NTV), and clock gating and power gating are heavily used to minimize power consumption during idle operation, which results to be 300nW. The chip is targeted at always-on applications, like keyword spotting (KWS); and it is prototyped on a 65nm CMOS process. The design is an only-inference one, with no online-learning capabilities.
The architecture belongs to the feed-forward category: 5 cores are used to implement fully connected spiking layers of Integrate and Fire (IF) neurons. To minimize power consumption, asynchronous wake-up circuits are employed to activate the layers only when there are incoming spikes.
On the GCSC and HeySnips datasets, the recognition accuracies are 91.8% and 95.8%, respectively. The total power consumption ranges between 75nW and 220nW.
This is an asynchronous digital architecture, with no online-learning capabilities provided. It is an inference-only chip.
The bit precision and network topology is chosen at synthesis time, while the neurons parameters and synapses weights can be programmed on chip. The neuron model employed is the Integrate and Fire (IF) one, with no leakage; the leakage can be added using an additional inhibitory input neuron to model it. A local clock is generated a neuron level when a spike arrives, so that the circuit consumes only static power when not operating. No time multiplexing is employed, the architecture is organised in a layer-by-layer fashion where all the neurons operate in parallel (i.e. each core corresponds to a neuron).
In this work, a Recurrent Spiking Neural Network (RSNN) processor is presented. The Verilog code is open source.
The key feature of this chip is the online learning capability, using a modified version of the feed-forward eligibility traces algorithm, which is a bio-inspired approximation of the BackPropagation Through Time (BPTT) algorithm employed for artificial RNNs. The chip performance is validated on gesture recognition, keyword spotting and navigation, with sub-150μW and sub-squared millimeter power and area budgets.
In this work, an only-inference digital chip is presented. The design is tuned towards event cameras output processing, employing convolution engines in the hardware.
The novelty of this design is that, even if it is a synchronous one, the number of operations performed is proportional to the number of events recorded by the camera, which allows very efficient inference when dealing with sparse inputs (e.g. low activity scenarios).
The neuron model employed in this work is a LIF one, with a delta-shaped synaptic kernel. The architecture topology is a feed-forward one, in which the neuron cores are arranged either is a cascade-fashion or in a configurable Processing Element (PE) array. The focus of this chip is to deal efficiently with the sparse nature of the activation maps in an SNN, by compressing the model with sparse data structures coupled with model pruning and 8 bits fixed point parallelism to reduce the on-chip memory requirement. The SNN architecture is mixed with an ANN one.
The final implementation, validated on an object detection task, achieves 29FPS when dealing with 1024x576 input frames; the throughput efficiency is 35.88TOPS/W and 1.05mJ/frame, running at 500MHz and being taped out on the TSMC 28nm CMOS process.
The object detection network is trained offline as ANN and then converted to an SNN, using the IVS 3 classes dataset and achieving 71.5% maP with on-chip inference.
Acknowledgements
I would like to thank Charlotte Frenkel for the valuable comments and suggestions.
Fabrizio Ottati is a Ph.D. student in the HLS Laboratory of the Department of Electronics and Communications, Politecnico di Torino. His main interests are event-based cameras, digital hardware design and neuromorphic computing. He is one of the maintainers of two open source projects in the field of neuromorphic computing, Tonic and Expelliarmus, and one of the founders of Open Neuromorphic.
+
+
+
+
+
\ No newline at end of file
diff --git a/p/efficient-compression-for-event-based-data/file_read_benchmark.png b/p/efficient-compression-for-event-based-data/file_read_benchmark.png
new file mode 100644
index 00000000..f448f3d3
Binary files /dev/null and b/p/efficient-compression-for-event-based-data/file_read_benchmark.png differ
diff --git a/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_1024x0_resize_box_3.png b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_1024x0_resize_box_3.png
new file mode 100644
index 00000000..815a262e
Binary files /dev/null and b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_1024x0_resize_box_3.png differ
diff --git a/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_120x120_fill_box_smart1_3.png b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_120x120_fill_box_smart1_3.png
new file mode 100644
index 00000000..f25fdd31
Binary files /dev/null and b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_120x120_fill_box_smart1_3.png differ
diff --git a/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_1600x0_resize_box_3.png b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_1600x0_resize_box_3.png
new file mode 100644
index 00000000..6f0650ca
Binary files /dev/null and b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_1600x0_resize_box_3.png differ
diff --git a/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_480x0_resize_box_3.png b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_480x0_resize_box_3.png
new file mode 100644
index 00000000..42f67656
Binary files /dev/null and b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_480x0_resize_box_3.png differ
diff --git a/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_800x0_resize_box_3.png b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_800x0_resize_box_3.png
new file mode 100644
index 00000000..22fffc66
Binary files /dev/null and b/p/efficient-compression-for-event-based-data/file_read_benchmark_hua993d955fce821c195de229da3476b0b_77837_800x0_resize_box_3.png differ
diff --git a/p/efficient-compression-for-event-based-data/file_read_benchmark_log.png b/p/efficient-compression-for-event-based-data/file_read_benchmark_log.png
new file mode 100644
index 00000000..76003b25
Binary files /dev/null and b/p/efficient-compression-for-event-based-data/file_read_benchmark_log.png differ
diff --git a/p/efficient-compression-for-event-based-data/file_read_benchmark_white.png b/p/efficient-compression-for-event-based-data/file_read_benchmark_white.png
new file mode 100644
index 00000000..cda81565
Binary files /dev/null and b/p/efficient-compression-for-event-based-data/file_read_benchmark_white.png differ
diff --git a/p/efficient-compression-for-event-based-data/index.html b/p/efficient-compression-for-event-based-data/index.html
new file mode 100644
index 00000000..d70ee7e8
--- /dev/null
+++ b/p/efficient-compression-for-event-based-data/index.html
@@ -0,0 +1,20 @@
+Efficient compression for event-based data
Choosing a good trade-off between disk footprint and file loading times.
Efficient compression for event-based data
Datasets grow larger in size
As neuromorphic algorithms tackle more complex tasks that are linked to bigger datasets, and event cameras mature to have higher spatial resolution, it is worth looking at how to encode that data efficiently when storing it on disk. To give you an example, Prophesee’s latest automotive object detection dataset is some 3.5 TB in size for under 40h of recordings with a single camera.
Event cameras record with fine-grained temporal resolution
In contrast to conventional cameras, event cameras output changes in illumination, which is already a form of compression. But the output data rate is still a lot higher cameras because of the microsecond temporal resolution that event cameras are able to record with. When streaming data, we get millions of tuples of microsecond timestamps, x/y coordinates and polarity indicators per second that look nothing like a frame but are a list of events:
So how can we store such data efficiently?
+A straightforward idea is to resort to formats such as hdf5 and numpy and store the arrays of events directly. But without exploiting any structure in the recorded data, those uncompressed formats end up having the largest file footprint. For our example automotive dataset, this would result in some 7-8 TB of data, which is undesirable. Event camera manufacturers have come up with ways to encode event streams more efficiently. Not only are we concerned about the size of event files on disk, but we also want to be able to read them back to memory as fast as possible!
+In the following figure we plot the results of our benchmark of different file type encodings and software frameworks that can decode files.
Ideally, we want to be close to the origin where we read fast and compression is high. The file size depends on the encoding, whereas the reading speed depends on the particular implementation/framework of how files are read. In terms of file size, we can see that numpy doesn’t use any compression whatsoever, resulting in some 1.7GB file for our sample recording. Prophesee’s evt3 and the generic lossless brotli formats achieve the best compression. In terms of reading speed, numpy is the fastest as it doesn’t deal with any compression on disk. Unzipping the compressed events from disk on the other hand using h5py is by far the slowest. Using Expelliarmus and the evt2 file format, we get very close to numpy reading speeds while at the same time only using a fourth of the disk space. For more information about Prophesee event encoding formats, check out this blog post.
Capable frameworks
The authors of this post have released Expelliarmus as a lightweight, well-tested, pip-installable framework that can read and write different formats easily. If you’re working with dat, evt2 or evt3 formats, why not give it a try?
Summary
When training spiking neural networks on event-based data, we want to be able to feed new data to the network as fast as possible. But given the high data rate of an event camera, the amount of data quickly becomes an issue itself, especially for more complex tasks. So we want to choose a good trade-off between a dataset size that’s manageable and reading speed. We hope that this article will help future groups that record large-scale datasets to pick a good encoding format.
Authors
Gregor Lenz is a research engineer at SynSense, where he works on machine learning pipelines that can train and deploy robust models on neuromorphic hardware. He holds a PhD in neuromorphic engineering from Sorbonne University in Paris, France.
Fabrizio Ottati is a Ph.D. student in the HLS Laboratory of the Department of Electronics and Communications, Politecnico di Torino. His main interests are event-based cameras, digital hardware design and neuromorphic computing. He is one of the maintainers of two open source projects in the field of neuromorphic computing, Tonic and Expelliarmus, and one of the founders of Open Neuromorphic.
The aedat4 file contains IMU events as well as change detection events, which increases the file size artificially in contrast to the other benchmarked formats.
Organization that aims at providing one place to reference all relevant open-source project in the neuromorphic research domain.
This organisation is created by a loose collective of open source collaborators across academia, industry and individual contributors. What connects us is the love for building tools that can be used in the neuromorphic community and we want to share ownership of this vision.
Open Neuromorphic (ONM) provides the following things:
A curated list of software frameworks to make it easier to find the tool you need.
A platform for your code. If you wish to create a new repository or migrate your existing code to ONM, please get in touch with us.
Educational content to get you started in the neuromorphic world.
Events about neuromorphic research and software, with contributions from both academia and industry.
Projects that we list here can fall into this non-exclusive list of categories:
Spiking Neural Networks (SNNs) training and/or inference, for both ML and neuroscience application.
Event-based sensors data handling.
Digital hardware designs for neuromorphic applications.
Mixed-signal hardware designs for neuromorphic applications.
Get in touch with us if you wish to give a talk, write an article or to know more about the neuromorphic world.
Comparing the most popular SNN frameworks for gradient-based optimization on top of PyTorch.
SNN library benchmarks
Open Neuromorphic’s list of SNN frameworks currently counts 10 libraries, and those are only the most popular ones! As the sizes of spiking neural network models grow thanks to deep learning, optimization becomes more important for researchers and practitioners alike. Training SNNs is often slow, as the stateful networks are typically fed sequential inputs. Today’s most popular training method then is some form of backpropagation through time, whose time complexity scales with the number of time steps. We benchmark libraries that all take slightly different approaches on how to extend PyTorch for gradient-based optimization of SNNs. While we focus on the time it takes to pass data forward and backward through the network, there are obviously other, non-tangible qualities of frameworks (extensibility, quality of documentation, ease of install, support for neuromorphic hardware …) that we’re not going to try to capture here. In our benchmarks, we use a single fully-connected (linear) and a leaky integrate and fire (LIF) layer. The input data has batch size of 10, 500 time steps and n neurons.
The first figure shows results for a small 512 neuron network. Overall, SpikingJelly is the fastest when using the CuPy backend, at just 1.5ms for both forward and backward call. The libraries that use an implementation of EXODUS (Sinabs / Rockpool) or SLAYER (Lava DL) equally benefit from custom CUDA code and vectorization across the time dimension in both forward and backward passes. It is noteworthy that such custom implementations exist for specific neuron models (such as the LIF under test), but not for arbitrary neuron models. Flexibility comes at a price, which is what frameworks such as snnTorch, Norse, Sinabs or Rockpool make use of. SpikingJelly also supports a conventional PyTorch GPU backend with which it’s possible to define neuron models more flexibly. Such implementations are also much easier to maintain and relying on the extensive testing of PyTorch means that it will likely work on a given machine configuration. Custom CUDA/CuPy backend implementations need to be compiled and then it is up to the maintainer to test it on different systems. On top of that, networks that are implemented in SLAYER, EXODUS or SpikingJelly with a CuPy backend cannot be executed on a CPU (unless converted).
When scaling up the number of neurons, the difference between performances becomes more evident. We notice that snnTorch has issues scaling up the forward and backward pass, and Lava DL goes out of memory (OOM) completely, potentially because of the use of conv3d kernels. SpikingJelly keeps its blazing fast forward pass, and EXODUS implementations have the quickest backward pass. SpikingJelly is more than 10 times faster than libraries that rely on pure PyTorch acceleration.
Summary
The ideal library will often depend on a multitude of factors, such as accessible documentation, usability of the API or pre-trained models. Generally speaking, PyTorch offers good support when custom neuron models (that have additional states, recurrence) are to be explored. For larger networks, it will likely pay off to rely on CUDA-accelerated existing implementations or implement CuPy backends for new neuron models. Yet another option is to experiment with torch.compile or CUDA graph replay, although that has not been tested here.
Code and comments
The code for this benchmark is available here. The order of dimensions in the input tensor and how it is fed to the respective models differs between libraries. Benchmarks are averaged across 100 runs on a NVIDIA RTX 2070 GPU with 8GB of memory. Standard deviations have been omitted because they are negligible. Some things that would be interesting to add:
check that forward dynamics are roughly equal in each case
effect of torch.compile on networks
effect of CUDA graph replay
memory consumption of different libraries
benchmarking JAX implementations
Author
Gregor Lenz holds a PhD in neuromorphic engineering from Sorbonne University and has been training SNNs for a little while now!
In this article, we will try to model a Leaky Spiking Neuron (LIF) using digital hardware: registers, memories, adders and so on.
Spiking neurons
In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer.
In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron. In the following, the destination neuron is denoted as post-synaptic neuron, with the index $i$, while the input neuron under consideration is denoted as pre-synaptic neuron, with the index $j$.
We denote the input spike train incoming from the pre-synaptic neuron with $\sigma_{j}(t)$:
+$$ \sigma_{j}(t) = \sum_{k} \delta(t-t_{k}) $$
+where $t_{k}$ are the spike timestamps of the spike train $\sigma_{j}(t)$.
The synapse connecting the pre-synaptic neuron with the post-synaptic neuron is denoted with $w_{ij}$. All the incoming spike trains are then integrated by the post-synaptic neuron membrane; the integration function can be modeled by a first-order low-pass filter, denoted with $\alpha_{i}(t)$:
+$$ \alpha_{i}(t) = \frac{1}{\tau_{u_{i}}} e^{-\frac{t}{\tau_{u_{i}}}}$$
+The spike train incoming from the pre-synaptic neuron, hence, is convolved with the membrane function; in real neurons, this corresponds to the input currents coming from the pre-synaptic neurons that charge the post-synaptic neuron membrane potential, $v_{i}(t)$. The sum of the currents in input to the post-synaptic neuron is denoted with $u_{i}(t)$ and modeled through the following equation:
+$$ u_{i}(t) = \sum_{j \neq i}{w_{ij} \cdot (\alpha_{v} \ast \sigma_{j})(t)} $$
+Each pre-synaptic neuron contributes with a current (spike train multiplied by the $w_{ij}$ synapse) and these sum up at the input of the post-synaptic neuron. Given the membrane potential of the destination neuron, denoted with $v_{i}(t)$, the differential equation describing its evolution through time is the following:
+$$ \frac{\partial}{\partial t} v_{i}(t) = -\frac{1}{\tau_{v}} v_{i}(t) + u_{i}(t)$$
+In addition to the input currents, we have the neuron leakage, $\frac{1}{\tau_{v}} v_{i}(t)$, modeled through a leakage coefficient $\frac{1}{\tau_{v}}$ that multiplies the membrane potential.
Discretising the model
Such a differential equation cannot be solved directly using discrete arithmetic, as it would be processed on digital hardware; hence, we need to discretise the equation. This discretisation leads to the following result:
+$$ v_{i}[t] = \beta \cdot v_{i}[t-1] + (1 - \beta) \cdot u_{i}[t] - \theta \cdot S_{i}[t] $$
+where $\beta$ is the decay coefficient associated to the leakage. We embed $(1-\beta)$ in the input current $u_{i}[t]$, by merging it with the synapse weights as a scaling factor; in this way, the input current $u_{i}[t]$ is normalised regardless of the decay constant $\tau_{v}$ value.
Notice that the membrane reset mechanism has been added: when a neuron spikes, its membrane potential goes back to the rest potential (usually equal to zero), and this is modeled by subtracting the threshold $\theta$ from $v_{i}(t)$ when an output spike occurs. The output spike is modeled through a function $S_{i}[t]$:
+$$ S_{i}[t] = 1 ~\text{if}~ v_{i}[t] \gt \theta ~\text{else}~ 0 $$
+This is equal to 1 at spike time (i.e. if at timestamp $t$ the membrane potential $v_{i}[t]$ is larger than the threshold $\theta$) and 0 elsewhere.
The input current is given by:
+$$ u_{i}[t] = \sum_{j \neq i}{w_{ij} \cdot S_{j}[t]} $$ Notice that since $S_{i}[t]$ is either 0 or 1, the input current $u_{i}[t]$ is equal to the sum of the synapses weights of the pre-synaptic neurons that spike at timestamp $t$.
Storage and addressing neurons states
Let us define the layer fan-in, i.e. how many pre-synaptic neurons are connected in input to each post-synaptic neuron in the layer; we denote this number with $N$. Then, we set the total number of neurons in our layer to $M$.
How do we describe a neuron in hardware? First of all, we need to list some basic information associated to each post-synaptic neuron:
its membrane potential $v_{i}[t]$.
the weights associated with the synapses, $w_{ij}$; since each post-synaptic neuron is connected in input to $N$ neurons, these synapses can be grouped in an $N$-entries vector $W_{i}$.
Since there are $M$ neurons in the layer, we need an $M$-entries vector, denoted with $V[t]$, to store the membrane potentials values evaluated at timestamp $t$; this vector is associated with a memory array in the hardware architecture.
An address is associated to each neuron, which can be thought as the $i$ index in the $V[t]$ vector; to obtain $v_{i}[t]$, the post-synaptic neuron address is used to index the membrane potentials memory $V[t]$.
We are able to store and retrieve a post-synaptic neuron membrane potential using a memory; now, we would like to charge it with the pre-synaptic neurons currents in order to emulate the behaviour of a neuron membrane; to do that, we need to get the corresponding input synapses $W_{i}$, multiply these by the spikes of the associated pre-synaptic neurons, sum them up and, then, accumulate these in the post-synaptic neuron membrane.
Let us start from a single input pre-synaptic neuron:
+$$ u_{ij}[t] = w_{ij} \cdot S_{j}[t] $$
+We know that $S_{j}[t]$ is either 1 or 0; hence, we have either $u_{ij}[t] = w_{ij}$ or $u_{ij}[t] = 0$; this means that the synapse weight is either added or not to the total current $u_{i}[t]$; hence, the weight $w_{ij}$ is read from memory only if the corresponding pre-synaptic neuron spikes! Given a layer of $M$ neurons, each of which is connected in input to $N$ synapses, we can think of grouping the $M \cdot N$ weights in a matrix, which can be associated with another memory array, denoted with $W$.
This memory is addressed with the pre-synaptic neuron and the post-synaptic neuron indices to retrieve the weight $w_{ij}$, which automatically corresponds to the $u_{ij}[t]$ current being accumulated in the post-synaptic neuron membrane when the pre-synaptic neuron spikes at timestamp $t$.
Spikes accumulation
Let us implement neural functionalities using the data structures defined for a neuron (i.e. membrane potential and synapses), starting with the membrane potential charging of a post-synaptic neuron. When the pre-synaptic neuron spikes, its synapse weight $w_{ij}$ gets extracted from the synapse memory $W$ and multiplied by the spike; since the spike is a digital bit equal to 1, this is equivalent to using $w_{ij}$ itself as input current for the post-synaptic neuron; to add this current to $v_{i}[t]$, we need to use an arithmetic circuit called adder!
The membrane potential $v_{i}[t]$ is read from the potentials memory $V[t]$ and added to the corresponding synapse current $w_{ij}$; the result is the membrane potential of the next time step, $v_{i}[t+1]$, that is stored in the register put on the adder output; this value is written back to the $V[t]$ memory in the next clock cycle. The register storing the adder output is denoted as membrane register.
To prevent multiple read-write cycles due to multiple spiking pre-synaptic neurons, one can think of adding a loop to the membrane register in order to accumulate all the currents of the pre-synaptic neurons that are spiking at timestep $t$ and writing the final value $v_{i}[t+1]$ back to memory only once. The corresponding circuit is shown in the following.
A multiplexer is placed on one side of the adder; in this way:
the first weight $w_{i0}$ to be accumulated is added to the $v_{i}[t]$ read from memory and saved to the membrane register:
+$$ v_{i}[t+1] = v_{i}[t] + w_{i0} $$
the successive weights are added to the membrane register content, so that all the currents are accumulated before writing $v_{i}[t+1]$ back to memory; using a non rigorous notation, this can be translated to the following equation:
+$$ v_{i}[t+1] = v_{i}[t+1] + w_{ij},~ 0 \lt j \leq N $$
Excitatory and inhibitory neurons
Our post-synaptic neuron is able to accumulate spikes in its membrane; however, input spikes do not always result in membrane potential charging! In fact, a pre-synaptic neuron can be excitatory (i.e. it charges the post-synaptic neuron membrane) or inhibitory (i.e. it discharges the post-synaptic neuron membrane); in the digital circuit, this phenomenon corresponds to adding or subtracting, respectively, the synapse weight $w_{ij}$ to or from $v_{i}[t]$; this functionality can be added to the architecture by placing an adder capable of performing both additions and subtractions, choosing among these with a control signal generated by an FSM (Finite State Machine), which is a sequential digital circuit that evolves through a series of states depending on its inputs and, consequently, generates controls signals for the rest of the circuit.
This FSM, given the operation to be executed on the post-synaptic neuron, chooses if the adder has to add or subtract the synapse current.
However, is this design efficient in terms of resources employed? It has to be reminded that inhibitory and excitatory neurons are chosen at chip programming time; this means that the neuron type does not change during the chip operation (however, with the solution we are about to propose, it would not be a problem to change the neuron type on-the-fly); hence, we can embed this information in the neuron description by adding a bit to the synapses weights memory row that, depending on its value, denotes that neuron as excitatory or inhibitory.
Suppose that, given a pre-synaptic neuron, all its $M$ output synapses are stored in a memory row of $n$ bits words, where $n$ is the number of bits to which the synapse weight is quantized. At the end of the memory row $j$, we add a bit denoted with $e_{j}$ that identifies the neuron type and that is read together with the weights from the same memory row: if the pre-synaptic neuron $j$ is excitatory, $e_{j}=1$ and the weight is added; if it is inhibitory, $e_{j}=0$ and the weight is subtracted; in this way, the $e_{j}$ field of the synapse can drive the adder directly.
Leakage
Let us introduce the characteristic feature of the LIF neuron: the leakage! We shall choose a (constant) leakage factor $\beta$ and multiply it by $v_{i}[t]$ to obtain $v_{i}[t+1]$, which is lower than $v_{i}[t]$ since some current has leaked from the membrane, and we model this through $\beta$:
+$$ v_{i}[t+1] = \beta \cdot v_{i}[t] $$
+However, multiplication is an expensive operation in hardware; furthermore, the leakage factor is smaller than one, so we would need to perform a fixed-point multiplication or, even worse, a division! How can we solve this problem?
If we choose $\beta$ as a power of $\frac{1}{2}$, such as $2^{-n}$, the multiplication becomes equivalent to a $n$-positions right shift! A really hardware-friendly operation!
In this circuit, an $n$-positions righ-shift block, denoted with the symbol >>, is placed on one of the adder inputs to obtain $\beta \cdot v_{i}[t]$ from $v_{i}[t]$. A multiplexer is introduced to choose among the synapse weight $w_{ij}$ and the leakage contribution $\beta \cdot v_{i}[t]$ as input to the adder.
Notice that the leakage has to be always subtracted from the membrane potential; hence, we cannot use $e_{j}$ directly to control the adder but we must modify the circuit so that a subtraction is performed during a leakage operation, regardless of the value of $e_{j}$. A possible solution is to use a signal from the FSM and a logic AND gate to force the adder control signal to 0 during a leakage operation.
Denoting with adder_ctrl the signal which controls the adder and with leak_op_n the one provided by the FSM, and stating that:
for adder_ctrl=1, the adder performs an addition, otherwise a subtraction.
leak_op_n=0 when a leakage operation has to performed.
adder_ctrl can be obtained as the logic AND operation of leak_op_n and $e_{j}$ so that, when leak_op_n=0, adder_ctrl=0 regardless of the value of $e_{j}$ and a subtraction is performed by the adder.
Spike mechanism
Our neuron needs to spike! If this is encoded as a single digital bit, given the spiking threshold $\theta$, we compare $v_{i}[t]$ to $\theta$ and generate a logic 1 in output when the membrane potential is larger than the threshold. This can be implemented using a comparator circuit.
The output of the comparator is used directly as spike bit.
The membrane has to be reset to a rest potential when the neuron spikes; hence, we need to subtract $\theta$ from $v_{i}[t]$ when the neuron fires. This can be done by driving the input multiplexer of the membrane register to provide $\theta$ in input to the adder, that performs a subtraction.
This circuit can be simplified:
by choosing $\theta = 2^m-1$, where $m$ is the bitwidth of the membrane register and the adder, having $v_{i}[t] \gt \theta$ is equivalent to having an overflow in the addition; hence, the comparison result is equal to the overflow flag of the adder, which can be provided directly in output as spike bit.
instead of subtracting $\theta$ from the membrane register, we can reset $v_{i}[t]$ to 0 when a spike occurs by forcing the membrane register content to 0 with a control signal; this is equivalent to using the oveflow flag of the adder as reset signal for the membrane register. This should not be done in an actual implementation: at least a register should be added on the reset signal of the membrane register to prevent glitches in the adder circuit from resetting it when it should not be.
The resulting circuit is the following.
Conclusion
Here we are, with a first prototype of our LIF layer digital circuit. In the next episode:
we will make it actually work. Right now, this is a functional model, that needs some modifications to behave correctly as a spiking neurons layer.
we will implement it in Verilog.
we will simulate it using open source tools, such as Verilator.
Acknowledgements
I would like to thank Jason Eshraghian, Steven Abreu and Gregor Lenz for the valuable corrections and comments that made this article way better than the original draft!
Credits
The cover image is the Loihi die, taken from WikiChip.
Authors
Fabrizio Ottati is a Ph.D. student in the HLS Laboratory of the Department of Electronics and Communications, Politecnico di Torino. His main interests are event-based cameras, digital hardware design and neuromorphic computing. He is one of the maintainers of two open source projects in the field of neuromorphic computing, Tonic and Expelliarmus, and one of the founders of Open Neuromorphic.
Organization that aims at providing one place to reference all relevant open-source project in the neuromorphic research domain.
+
\ No newline at end of file
diff --git a/page/index.html b/page/index.html
new file mode 100644
index 00000000..21941c97
--- /dev/null
+++ b/page/index.html
@@ -0,0 +1,6 @@
+Pages
+
\ No newline at end of file
diff --git a/page/index.xml b/page/index.xml
new file mode 100644
index 00000000..7784b423
--- /dev/null
+++ b/page/index.xml
@@ -0,0 +1,9 @@
+Pages on Open Neuromorphichttps://open-neuromorphic.org/page/Recent content in Pages on Open NeuromorphicHugo -- gohugo.ioen-usAbouthttps://open-neuromorphic.org/about/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/about/<img src="https://open-neuromorphic.org/about/ONM.png" alt="Featured image of post About" />This organisation is created by a loose collective of open source collaborators across academia, industry and individual contributors. Most of us have never met in person before but have started contributing to a common or each other’s projects! What connects us is the love for building tools that can be used in the neuromorphic community and we want to share ownership of this vision. If you feel like that resonates with you, please don’t hesitate to get in touch!Eventshttps://open-neuromorphic.org/events/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/events/<img src="https://open-neuromorphic.org/img/ONM.png" alt="Featured image of post Events" />Upcoming events Join our newsletter to be updated on new events and get a reminder!
+2023-09-25: Giulia D’Angelo, What’s catching your eye? The visual attention mechanism Giulia D’Angelo
+Time 6PM-7:30PM, CEST.
+Abstract Every agent, whether animal or robotic, needs to process its visual sensory input in an efficient way, to allow understanding of, and interaction with, the environment. The process of filtering revelant information out of the continuous bombardment of complex sensory data is called selective attention.Events recordingshttps://open-neuromorphic.org/events-recordings/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/events-recordings/2023-01-26: Trevor Bekolay, Nengo - Applied Brain Research Recording https://youtu.be/sgu9l_bqAHM
+Slides click here
+Speaker’s bio Trevor Bekolay’s primary research interest is in learning and memory. In his Master’s degree, he explored how to do supervised, unsupervised, and reinforcement learning in networks of biologically plausible spiking neurons. In his PhD, he applied this knowledge to the domain of speech to explore how sounds coming into the ear become high-level linguistic representations, and how those representations become sequences of vocal tract movements that produce speech.Getting involvedhttps://open-neuromorphic.org/getting-involved/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/getting-involved/The easiest way to get in touch is probably through Discord, where we discuss research topics, job opportunities, open hardware, spiking neural network training and much more. We’d be delighted to have you join! If you feel like contributing to ONM but you’re not exactly sure how, here are some ideas to get you started:
+Link an interesting open source repository to our collection so that others can find it too!Resourceshttps://open-neuromorphic.org/resources/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/resources/Please check our Github repository for a list of neuromorphic open source software and hardware!Teamhttps://open-neuromorphic.org/team/Mon, 01 Jan 0001 00:00:00 +0000https://open-neuromorphic.org/team/Fabrizio Ottati Fabrizio Ottati
+Fabrizio Ottati is a Ph.D. student in the Department of Electronics and Communications of Politecnico di Torino, under the supervision of professor Luciano Lavagno and professor Mario Roberto Casu.
+His main interests are event-based cameras, digital hardware design and automation, spiking neural networks and piedmontese red wine. He is the maintainer of two open source projects in the field of neuromorphic computing, Tonic and Expelliarmus. You can find more information on his website.
\ No newline at end of file
diff --git a/page/page/1/index.html b/page/page/1/index.html
new file mode 100644
index 00000000..325899b1
--- /dev/null
+++ b/page/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/page/
\ No newline at end of file
diff --git a/page/page/2/index.html b/page/page/2/index.html
new file mode 100644
index 00000000..78bd027a
--- /dev/null
+++ b/page/page/2/index.html
@@ -0,0 +1,6 @@
+Pages
+
\ No newline at end of file
diff --git a/post/index.html b/post/index.html
new file mode 100644
index 00000000..bb6d3575
--- /dev/null
+++ b/post/index.html
@@ -0,0 +1,6 @@
+Posts
+
\ No newline at end of file
diff --git a/post/index.xml b/post/index.xml
new file mode 100644
index 00000000..2df8c109
--- /dev/null
+++ b/post/index.xml
@@ -0,0 +1,6 @@
+Posts on Open Neuromorphichttps://open-neuromorphic.org/post/Recent content in Posts on Open NeuromorphicHugo -- gohugo.ioen-usWed, 02 Aug 2023 00:00:00 +0000SNN library benchmarkshttps://open-neuromorphic.org/p/snn-library-benchmarks/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/p/snn-library-benchmarks/<img src="https://open-neuromorphic.org/p/snn-library-benchmarks/framework-benchmarking-16k-header.png" alt="Featured image of post SNN library benchmarks" />SNN library benchmarks Open Neuromorphic’s list of SNN frameworks currently counts 10 libraries, and those are only the most popular ones! As the sizes of spiking neural network models grow thanks to deep learning, optimization becomes more important for researchers and practitioners alike. Training SNNs is often slow, as the stateful networks are typically fed sequential inputs. Today’s most popular training method then is some form of backpropagation through time, whose time complexity scales with the number of time steps.Bits of Chips | TrueNorthhttps://open-neuromorphic.org/p/bits-of-chips-truenorth/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/p/bits-of-chips-truenorth/<img src="https://open-neuromorphic.org/p/bits-of-chips-truenorth/brain-to-chip.png" alt="Featured image of post Bits of Chips | TrueNorth" />Why do we want to emulate the brain? If you have ever read an article on neuromorphic computing, you might have noticed that in the introduction of each of these there is the same statement: “The brain is much powerful than any AI machine when it comes to cognitive tasks but it runs on a 10W power budget!”. This is absolutely true: neurons in the brain communicate among each other by means of spikes, which are short voltage pulses that propagate from one neuron to the other.Efficient compression for event-based datahttps://open-neuromorphic.org/p/efficient-compression-for-event-based-data/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/<img src="https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/file_read_benchmark.png" alt="Featured image of post Efficient compression for event-based data" />Efficient compression for event-based data Datasets grow larger in size As neuromorphic algorithms tackle more complex tasks that are linked to bigger datasets, and event cameras mature to have higher spatial resolution, it is worth looking at how to encode that data efficiently when storing it on disk. To give you an example, Prophesee’s latest automotive object detection dataset is some 3.5 TB in size for under 40h of recordings with a single camera.Digital neuromophic hardware read listhttps://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/<img src="https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png" alt="Featured image of post Digital neuromophic hardware read list" />Here’s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles!
+If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here.
+2015 TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al.Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer.
+In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron.Open Neuromorphichttps://open-neuromorphic.org/p/open-neuromorphic/Wed, 21 Dec 2022 00:00:00 +0000https://open-neuromorphic.org/p/open-neuromorphic/<img src="https://open-neuromorphic.org/p/open-neuromorphic/ONM.png" alt="Featured image of post Open Neuromorphic" />This organisation is created by a loose collective of open source collaborators across academia, industry and individual contributors. What connects us is the love for building tools that can be used in the neuromorphic community and we want to share ownership of this vision.
+Open Neuromorphic (ONM) provides the following things:
+A curated list of software frameworks to make it easier to find the tool you need. A platform for your code.
\ No newline at end of file
diff --git a/post/page/1/index.html b/post/page/1/index.html
new file mode 100644
index 00000000..3040b8fc
--- /dev/null
+++ b/post/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/post/
\ No newline at end of file
diff --git a/post/page/2/index.html b/post/page/2/index.html
new file mode 100644
index 00000000..3dcc899c
--- /dev/null
+++ b/post/page/2/index.html
@@ -0,0 +1,6 @@
+Posts
+
\ No newline at end of file
diff --git a/resources/index.html b/resources/index.html
new file mode 100644
index 00000000..18e1b3ef
--- /dev/null
+++ b/resources/index.html
@@ -0,0 +1,9 @@
+Resources
+
\ No newline at end of file
diff --git a/tags/ai/index.xml b/tags/ai/index.xml
new file mode 100644
index 00000000..3c2e0c06
--- /dev/null
+++ b/tags/ai/index.xml
@@ -0,0 +1,4 @@
+AI on Open Neuromorphichttps://open-neuromorphic.org/tags/ai/Recent content in AI on Open NeuromorphicHugo -- gohugo.ioen-usWed, 11 Jan 2023 00:00:00 +0000Digital neuromophic hardware read listhttps://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/<img src="https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png" alt="Featured image of post Digital neuromophic hardware read list" />Here’s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles!
+If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here.
+2015 TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al.Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer.
+In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron.
\ No newline at end of file
diff --git a/tags/ai/page/1/index.html b/tags/ai/page/1/index.html
new file mode 100644
index 00000000..6f24b075
--- /dev/null
+++ b/tags/ai/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/ai/
\ No newline at end of file
diff --git a/tags/compression/index.html b/tags/compression/index.html
new file mode 100644
index 00000000..406b003b
--- /dev/null
+++ b/tags/compression/index.html
@@ -0,0 +1,5 @@
+Tag: compression - Open Neuromorphic
+
\ No newline at end of file
diff --git a/tags/compression/index.xml b/tags/compression/index.xml
new file mode 100644
index 00000000..72db7c92
--- /dev/null
+++ b/tags/compression/index.xml
@@ -0,0 +1 @@
+compression on Open Neuromorphichttps://open-neuromorphic.org/tags/compression/Recent content in compression on Open NeuromorphicHugo -- gohugo.ioen-usTue, 28 Feb 2023 00:00:00 +0000Efficient compression for event-based datahttps://open-neuromorphic.org/p/efficient-compression-for-event-based-data/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/<img src="https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/file_read_benchmark.png" alt="Featured image of post Efficient compression for event-based data" />Efficient compression for event-based data Datasets grow larger in size As neuromorphic algorithms tackle more complex tasks that are linked to bigger datasets, and event cameras mature to have higher spatial resolution, it is worth looking at how to encode that data efficiently when storing it on disk. To give you an example, Prophesee’s latest automotive object detection dataset is some 3.5 TB in size for under 40h of recordings with a single camera.
\ No newline at end of file
diff --git a/tags/compression/page/1/index.html b/tags/compression/page/1/index.html
new file mode 100644
index 00000000..1a4debaa
--- /dev/null
+++ b/tags/compression/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/compression/
\ No newline at end of file
diff --git a/tags/digital/index.html b/tags/digital/index.html
new file mode 100644
index 00000000..cd756384
--- /dev/null
+++ b/tags/digital/index.html
@@ -0,0 +1,5 @@
+Tag: digital - Open Neuromorphic
+
\ No newline at end of file
diff --git a/tags/digital/index.xml b/tags/digital/index.xml
new file mode 100644
index 00000000..ce141f55
--- /dev/null
+++ b/tags/digital/index.xml
@@ -0,0 +1,4 @@
+digital on Open Neuromorphichttps://open-neuromorphic.org/tags/digital/Recent content in digital on Open NeuromorphicHugo -- gohugo.ioen-usMon, 27 Mar 2023 00:00:00 +0000Bits of Chips | TrueNorthhttps://open-neuromorphic.org/p/bits-of-chips-truenorth/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/p/bits-of-chips-truenorth/<img src="https://open-neuromorphic.org/p/bits-of-chips-truenorth/brain-to-chip.png" alt="Featured image of post Bits of Chips | TrueNorth" />Why do we want to emulate the brain? If you have ever read an article on neuromorphic computing, you might have noticed that in the introduction of each of these there is the same statement: “The brain is much powerful than any AI machine when it comes to cognitive tasks but it runs on a 10W power budget!”. This is absolutely true: neurons in the brain communicate among each other by means of spikes, which are short voltage pulses that propagate from one neuron to the other.Digital neuromophic hardware read listhttps://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/<img src="https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png" alt="Featured image of post Digital neuromophic hardware read list" />Here’s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles!
+If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here.
+2015 TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al.Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer.
+In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron.
\ No newline at end of file
diff --git a/tags/digital/page/1/index.html b/tags/digital/page/1/index.html
new file mode 100644
index 00000000..305cf42e
--- /dev/null
+++ b/tags/digital/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/digital/
\ No newline at end of file
diff --git a/tags/event-camera/index.html b/tags/event-camera/index.html
new file mode 100644
index 00000000..109b89d6
--- /dev/null
+++ b/tags/event-camera/index.html
@@ -0,0 +1,5 @@
+Tag: event camera - Open Neuromorphic
+
\ No newline at end of file
diff --git a/tags/event-camera/index.xml b/tags/event-camera/index.xml
new file mode 100644
index 00000000..580c6b1f
--- /dev/null
+++ b/tags/event-camera/index.xml
@@ -0,0 +1 @@
+event camera on Open Neuromorphichttps://open-neuromorphic.org/tags/event-camera/Recent content in event camera on Open NeuromorphicHugo -- gohugo.ioen-usTue, 28 Feb 2023 00:00:00 +0000Efficient compression for event-based datahttps://open-neuromorphic.org/p/efficient-compression-for-event-based-data/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/<img src="https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/file_read_benchmark.png" alt="Featured image of post Efficient compression for event-based data" />Efficient compression for event-based data Datasets grow larger in size As neuromorphic algorithms tackle more complex tasks that are linked to bigger datasets, and event cameras mature to have higher spatial resolution, it is worth looking at how to encode that data efficiently when storing it on disk. To give you an example, Prophesee’s latest automotive object detection dataset is some 3.5 TB in size for under 40h of recordings with a single camera.
\ No newline at end of file
diff --git a/tags/event-camera/page/1/index.html b/tags/event-camera/page/1/index.html
new file mode 100644
index 00000000..0b8da544
--- /dev/null
+++ b/tags/event-camera/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/event-camera/
\ No newline at end of file
diff --git a/tags/events/index.html b/tags/events/index.html
new file mode 100644
index 00000000..fc132119
--- /dev/null
+++ b/tags/events/index.html
@@ -0,0 +1,5 @@
+Tag: events - Open Neuromorphic
+
\ No newline at end of file
diff --git a/tags/events/index.xml b/tags/events/index.xml
new file mode 100644
index 00000000..d7d94a2c
--- /dev/null
+++ b/tags/events/index.xml
@@ -0,0 +1 @@
+events on Open Neuromorphichttps://open-neuromorphic.org/tags/events/Recent content in events on Open NeuromorphicHugo -- gohugo.ioen-usTue, 28 Feb 2023 00:00:00 +0000Efficient compression for event-based datahttps://open-neuromorphic.org/p/efficient-compression-for-event-based-data/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/<img src="https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/file_read_benchmark.png" alt="Featured image of post Efficient compression for event-based data" />Efficient compression for event-based data Datasets grow larger in size As neuromorphic algorithms tackle more complex tasks that are linked to bigger datasets, and event cameras mature to have higher spatial resolution, it is worth looking at how to encode that data efficiently when storing it on disk. To give you an example, Prophesee’s latest automotive object detection dataset is some 3.5 TB in size for under 40h of recordings with a single camera.
\ No newline at end of file
diff --git a/tags/events/page/1/index.html b/tags/events/page/1/index.html
new file mode 100644
index 00000000..e154c0ef
--- /dev/null
+++ b/tags/events/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/events/
\ No newline at end of file
diff --git a/tags/file-encoding/index.html b/tags/file-encoding/index.html
new file mode 100644
index 00000000..b44fadce
--- /dev/null
+++ b/tags/file-encoding/index.html
@@ -0,0 +1,5 @@
+Tag: file encoding - Open Neuromorphic
+
\ No newline at end of file
diff --git a/tags/file-encoding/index.xml b/tags/file-encoding/index.xml
new file mode 100644
index 00000000..a4772bc8
--- /dev/null
+++ b/tags/file-encoding/index.xml
@@ -0,0 +1 @@
+file encoding on Open Neuromorphichttps://open-neuromorphic.org/tags/file-encoding/Recent content in file encoding on Open NeuromorphicHugo -- gohugo.ioen-usTue, 28 Feb 2023 00:00:00 +0000Efficient compression for event-based datahttps://open-neuromorphic.org/p/efficient-compression-for-event-based-data/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/<img src="https://open-neuromorphic.org/p/efficient-compression-for-event-based-data/file_read_benchmark.png" alt="Featured image of post Efficient compression for event-based data" />Efficient compression for event-based data Datasets grow larger in size As neuromorphic algorithms tackle more complex tasks that are linked to bigger datasets, and event cameras mature to have higher spatial resolution, it is worth looking at how to encode that data efficiently when storing it on disk. To give you an example, Prophesee’s latest automotive object detection dataset is some 3.5 TB in size for under 40h of recordings with a single camera.
\ No newline at end of file
diff --git a/tags/file-encoding/page/1/index.html b/tags/file-encoding/page/1/index.html
new file mode 100644
index 00000000..0962a307
--- /dev/null
+++ b/tags/file-encoding/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/file-encoding/
\ No newline at end of file
diff --git a/tags/framework/index.html b/tags/framework/index.html
new file mode 100644
index 00000000..76ea9421
--- /dev/null
+++ b/tags/framework/index.html
@@ -0,0 +1,5 @@
+Tag: framework - Open Neuromorphic
+
\ No newline at end of file
diff --git a/tags/framework/index.xml b/tags/framework/index.xml
new file mode 100644
index 00000000..fe7cacb5
--- /dev/null
+++ b/tags/framework/index.xml
@@ -0,0 +1 @@
+framework on Open Neuromorphichttps://open-neuromorphic.org/tags/framework/Recent content in framework on Open NeuromorphicHugo -- gohugo.ioen-usWed, 02 Aug 2023 00:00:00 +0000SNN library benchmarkshttps://open-neuromorphic.org/p/snn-library-benchmarks/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/p/snn-library-benchmarks/<img src="https://open-neuromorphic.org/p/snn-library-benchmarks/framework-benchmarking-16k-header.png" alt="Featured image of post SNN library benchmarks" />SNN library benchmarks Open Neuromorphic’s list of SNN frameworks currently counts 10 libraries, and those are only the most popular ones! As the sizes of spiking neural network models grow thanks to deep learning, optimization becomes more important for researchers and practitioners alike. Training SNNs is often slow, as the stateful networks are typically fed sequential inputs. Today’s most popular training method then is some form of backpropagation through time, whose time complexity scales with the number of time steps.
\ No newline at end of file
diff --git a/tags/framework/page/1/index.html b/tags/framework/page/1/index.html
new file mode 100644
index 00000000..99b2eb21
--- /dev/null
+++ b/tags/framework/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/framework/
\ No newline at end of file
diff --git a/tags/hardware/index.html b/tags/hardware/index.html
new file mode 100644
index 00000000..8e736dc6
--- /dev/null
+++ b/tags/hardware/index.html
@@ -0,0 +1,5 @@
+Tag: hardware - Open Neuromorphic
+
\ No newline at end of file
diff --git a/tags/hardware/index.xml b/tags/hardware/index.xml
new file mode 100644
index 00000000..2d3b5664
--- /dev/null
+++ b/tags/hardware/index.xml
@@ -0,0 +1,4 @@
+hardware on Open Neuromorphichttps://open-neuromorphic.org/tags/hardware/Recent content in hardware on Open NeuromorphicHugo -- gohugo.ioen-usMon, 27 Mar 2023 00:00:00 +0000Bits of Chips | TrueNorthhttps://open-neuromorphic.org/p/bits-of-chips-truenorth/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/p/bits-of-chips-truenorth/<img src="https://open-neuromorphic.org/p/bits-of-chips-truenorth/brain-to-chip.png" alt="Featured image of post Bits of Chips | TrueNorth" />Why do we want to emulate the brain? If you have ever read an article on neuromorphic computing, you might have noticed that in the introduction of each of these there is the same statement: “The brain is much powerful than any AI machine when it comes to cognitive tasks but it runs on a 10W power budget!”. This is absolutely true: neurons in the brain communicate among each other by means of spikes, which are short voltage pulses that propagate from one neuron to the other.Digital neuromophic hardware read listhttps://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/<img src="https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png" alt="Featured image of post Digital neuromophic hardware read list" />Here’s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles!
+If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here.
+2015 TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al.Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer.
+In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron.
\ No newline at end of file
diff --git a/tags/hardware/page/1/index.html b/tags/hardware/page/1/index.html
new file mode 100644
index 00000000..0d733623
--- /dev/null
+++ b/tags/hardware/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/hardware/
\ No newline at end of file
diff --git a/tags/index.html b/tags/index.html
new file mode 100644
index 00000000..b3dd3ecc
--- /dev/null
+++ b/tags/index.html
@@ -0,0 +1,8 @@
+Tags
+
\ No newline at end of file
diff --git a/tags/index.xml b/tags/index.xml
new file mode 100644
index 00000000..edad2d9f
--- /dev/null
+++ b/tags/index.xml
@@ -0,0 +1 @@
+Tags on Open Neuromorphichttps://open-neuromorphic.org/tags/Recent content in Tags on Open NeuromorphicHugo -- gohugo.ioen-usWed, 02 Aug 2023 00:00:00 +0000frameworkhttps://open-neuromorphic.org/tags/framework/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/tags/framework/libraryhttps://open-neuromorphic.org/tags/library/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/tags/library/pytorchhttps://open-neuromorphic.org/tags/pytorch/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/tags/pytorch/snnhttps://open-neuromorphic.org/tags/snn/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/tags/snn/digitalhttps://open-neuromorphic.org/tags/digital/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/tags/digital/hardwarehttps://open-neuromorphic.org/tags/hardware/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/tags/hardware/neuromorphichttps://open-neuromorphic.org/tags/neuromorphic/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/tags/neuromorphic/researchhttps://open-neuromorphic.org/tags/research/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/tags/research/compressionhttps://open-neuromorphic.org/tags/compression/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/tags/compression/event camerahttps://open-neuromorphic.org/tags/event-camera/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/tags/event-camera/eventshttps://open-neuromorphic.org/tags/events/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/tags/events/file encodinghttps://open-neuromorphic.org/tags/file-encoding/Tue, 28 Feb 2023 00:00:00 +0000https://open-neuromorphic.org/tags/file-encoding/AIhttps://open-neuromorphic.org/tags/ai/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/tags/ai/machine learninghttps://open-neuromorphic.org/tags/machine-learning/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/tags/machine-learning/rtlhttps://open-neuromorphic.org/tags/rtl/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/tags/rtl/spikinghttps://open-neuromorphic.org/tags/spiking/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/tags/spiking/veriloghttps://open-neuromorphic.org/tags/verilog/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/tags/verilog/
\ No newline at end of file
diff --git a/tags/library/index.html b/tags/library/index.html
new file mode 100644
index 00000000..079bd773
--- /dev/null
+++ b/tags/library/index.html
@@ -0,0 +1,5 @@
+Tag: library - Open Neuromorphic
+
\ No newline at end of file
diff --git a/tags/library/index.xml b/tags/library/index.xml
new file mode 100644
index 00000000..e517419d
--- /dev/null
+++ b/tags/library/index.xml
@@ -0,0 +1 @@
+library on Open Neuromorphichttps://open-neuromorphic.org/tags/library/Recent content in library on Open NeuromorphicHugo -- gohugo.ioen-usWed, 02 Aug 2023 00:00:00 +0000SNN library benchmarkshttps://open-neuromorphic.org/p/snn-library-benchmarks/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/p/snn-library-benchmarks/<img src="https://open-neuromorphic.org/p/snn-library-benchmarks/framework-benchmarking-16k-header.png" alt="Featured image of post SNN library benchmarks" />SNN library benchmarks Open Neuromorphic’s list of SNN frameworks currently counts 10 libraries, and those are only the most popular ones! As the sizes of spiking neural network models grow thanks to deep learning, optimization becomes more important for researchers and practitioners alike. Training SNNs is often slow, as the stateful networks are typically fed sequential inputs. Today’s most popular training method then is some form of backpropagation through time, whose time complexity scales with the number of time steps.
\ No newline at end of file
diff --git a/tags/library/page/1/index.html b/tags/library/page/1/index.html
new file mode 100644
index 00000000..3e230c8f
--- /dev/null
+++ b/tags/library/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/library/
\ No newline at end of file
diff --git a/tags/machine-learning/index.html b/tags/machine-learning/index.html
new file mode 100644
index 00000000..206b95f5
--- /dev/null
+++ b/tags/machine-learning/index.html
@@ -0,0 +1,5 @@
+Tag: machine learning - Open Neuromorphic
+
\ No newline at end of file
diff --git a/tags/machine-learning/index.xml b/tags/machine-learning/index.xml
new file mode 100644
index 00000000..f099053b
--- /dev/null
+++ b/tags/machine-learning/index.xml
@@ -0,0 +1,2 @@
+machine learning on Open Neuromorphichttps://open-neuromorphic.org/tags/machine-learning/Recent content in machine learning on Open NeuromorphicHugo -- gohugo.ioen-usMon, 02 Jan 2023 00:00:00 +0000Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer.
+In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron.
\ No newline at end of file
diff --git a/tags/machine-learning/page/1/index.html b/tags/machine-learning/page/1/index.html
new file mode 100644
index 00000000..2d789551
--- /dev/null
+++ b/tags/machine-learning/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/machine-learning/
\ No newline at end of file
diff --git a/tags/neuromorphic/index.html b/tags/neuromorphic/index.html
new file mode 100644
index 00000000..8027d2be
--- /dev/null
+++ b/tags/neuromorphic/index.html
@@ -0,0 +1,5 @@
+Tag: neuromorphic - Open Neuromorphic
+
\ No newline at end of file
diff --git a/tags/neuromorphic/index.xml b/tags/neuromorphic/index.xml
new file mode 100644
index 00000000..ad3cd43b
--- /dev/null
+++ b/tags/neuromorphic/index.xml
@@ -0,0 +1,3 @@
+neuromorphic on Open Neuromorphichttps://open-neuromorphic.org/tags/neuromorphic/Recent content in neuromorphic on Open NeuromorphicHugo -- gohugo.ioen-usMon, 27 Mar 2023 00:00:00 +0000Bits of Chips | TrueNorthhttps://open-neuromorphic.org/p/bits-of-chips-truenorth/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/p/bits-of-chips-truenorth/<img src="https://open-neuromorphic.org/p/bits-of-chips-truenorth/brain-to-chip.png" alt="Featured image of post Bits of Chips | TrueNorth" />Why do we want to emulate the brain? If you have ever read an article on neuromorphic computing, you might have noticed that in the introduction of each of these there is the same statement: “The brain is much powerful than any AI machine when it comes to cognitive tasks but it runs on a 10W power budget!”. This is absolutely true: neurons in the brain communicate among each other by means of spikes, which are short voltage pulses that propagate from one neuron to the other.Digital neuromophic hardware read listhttps://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/<img src="https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png" alt="Featured image of post Digital neuromophic hardware read list" />Here’s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles!
+If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here.
+2015 TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al.
\ No newline at end of file
diff --git a/tags/neuromorphic/page/1/index.html b/tags/neuromorphic/page/1/index.html
new file mode 100644
index 00000000..5d7c9d83
--- /dev/null
+++ b/tags/neuromorphic/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/neuromorphic/
\ No newline at end of file
diff --git a/tags/page/1/index.html b/tags/page/1/index.html
new file mode 100644
index 00000000..bfac06e9
--- /dev/null
+++ b/tags/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/
\ No newline at end of file
diff --git a/tags/page/2/index.html b/tags/page/2/index.html
new file mode 100644
index 00000000..458e3965
--- /dev/null
+++ b/tags/page/2/index.html
@@ -0,0 +1,8 @@
+Tags
+
\ No newline at end of file
diff --git a/tags/page/3/index.html b/tags/page/3/index.html
new file mode 100644
index 00000000..708c94a7
--- /dev/null
+++ b/tags/page/3/index.html
@@ -0,0 +1,8 @@
+Tags
+
\ No newline at end of file
diff --git a/tags/page/4/index.html b/tags/page/4/index.html
new file mode 100644
index 00000000..d6bb7719
--- /dev/null
+++ b/tags/page/4/index.html
@@ -0,0 +1,8 @@
+Tags
+
\ No newline at end of file
diff --git a/tags/pytorch/index.html b/tags/pytorch/index.html
new file mode 100644
index 00000000..43175f88
--- /dev/null
+++ b/tags/pytorch/index.html
@@ -0,0 +1,5 @@
+Tag: pytorch - Open Neuromorphic
+
\ No newline at end of file
diff --git a/tags/pytorch/index.xml b/tags/pytorch/index.xml
new file mode 100644
index 00000000..5934564f
--- /dev/null
+++ b/tags/pytorch/index.xml
@@ -0,0 +1 @@
+pytorch on Open Neuromorphichttps://open-neuromorphic.org/tags/pytorch/Recent content in pytorch on Open NeuromorphicHugo -- gohugo.ioen-usWed, 02 Aug 2023 00:00:00 +0000SNN library benchmarkshttps://open-neuromorphic.org/p/snn-library-benchmarks/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/p/snn-library-benchmarks/<img src="https://open-neuromorphic.org/p/snn-library-benchmarks/framework-benchmarking-16k-header.png" alt="Featured image of post SNN library benchmarks" />SNN library benchmarks Open Neuromorphic’s list of SNN frameworks currently counts 10 libraries, and those are only the most popular ones! As the sizes of spiking neural network models grow thanks to deep learning, optimization becomes more important for researchers and practitioners alike. Training SNNs is often slow, as the stateful networks are typically fed sequential inputs. Today’s most popular training method then is some form of backpropagation through time, whose time complexity scales with the number of time steps.
\ No newline at end of file
diff --git a/tags/pytorch/page/1/index.html b/tags/pytorch/page/1/index.html
new file mode 100644
index 00000000..22724b20
--- /dev/null
+++ b/tags/pytorch/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/pytorch/
\ No newline at end of file
diff --git a/tags/research/index.html b/tags/research/index.html
new file mode 100644
index 00000000..d8ea751d
--- /dev/null
+++ b/tags/research/index.html
@@ -0,0 +1,5 @@
+Tag: research - Open Neuromorphic
+
\ No newline at end of file
diff --git a/tags/research/index.xml b/tags/research/index.xml
new file mode 100644
index 00000000..4a70c8e7
--- /dev/null
+++ b/tags/research/index.xml
@@ -0,0 +1,3 @@
+research on Open Neuromorphichttps://open-neuromorphic.org/tags/research/Recent content in research on Open NeuromorphicHugo -- gohugo.ioen-usMon, 27 Mar 2023 00:00:00 +0000Bits of Chips | TrueNorthhttps://open-neuromorphic.org/p/bits-of-chips-truenorth/Mon, 27 Mar 2023 00:00:00 +0000https://open-neuromorphic.org/p/bits-of-chips-truenorth/<img src="https://open-neuromorphic.org/p/bits-of-chips-truenorth/brain-to-chip.png" alt="Featured image of post Bits of Chips | TrueNorth" />Why do we want to emulate the brain? If you have ever read an article on neuromorphic computing, you might have noticed that in the introduction of each of these there is the same statement: “The brain is much powerful than any AI machine when it comes to cognitive tasks but it runs on a 10W power budget!”. This is absolutely true: neurons in the brain communicate among each other by means of spikes, which are short voltage pulses that propagate from one neuron to the other.Digital neuromophic hardware read listhttps://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/<img src="https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png" alt="Featured image of post Digital neuromophic hardware read list" />Here’s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles!
+If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here.
+2015 TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al.
\ No newline at end of file
diff --git a/tags/research/page/1/index.html b/tags/research/page/1/index.html
new file mode 100644
index 00000000..d7116322
--- /dev/null
+++ b/tags/research/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/research/
\ No newline at end of file
diff --git a/tags/rtl/index.html b/tags/rtl/index.html
new file mode 100644
index 00000000..7ba6efc9
--- /dev/null
+++ b/tags/rtl/index.html
@@ -0,0 +1,5 @@
+Tag: rtl - Open Neuromorphic
+
\ No newline at end of file
diff --git a/tags/rtl/index.xml b/tags/rtl/index.xml
new file mode 100644
index 00000000..d2fe18ce
--- /dev/null
+++ b/tags/rtl/index.xml
@@ -0,0 +1,2 @@
+rtl on Open Neuromorphichttps://open-neuromorphic.org/tags/rtl/Recent content in rtl on Open NeuromorphicHugo -- gohugo.ioen-usMon, 02 Jan 2023 00:00:00 +0000Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer.
+In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron.
\ No newline at end of file
diff --git a/tags/rtl/page/1/index.html b/tags/rtl/page/1/index.html
new file mode 100644
index 00000000..d7d89685
--- /dev/null
+++ b/tags/rtl/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/rtl/
\ No newline at end of file
diff --git a/tags/snn/index.html b/tags/snn/index.html
new file mode 100644
index 00000000..901aeb62
--- /dev/null
+++ b/tags/snn/index.html
@@ -0,0 +1,5 @@
+Tag: snn - Open Neuromorphic
+
\ No newline at end of file
diff --git a/tags/snn/index.xml b/tags/snn/index.xml
new file mode 100644
index 00000000..c42e2078
--- /dev/null
+++ b/tags/snn/index.xml
@@ -0,0 +1,4 @@
+snn on Open Neuromorphichttps://open-neuromorphic.org/tags/snn/Recent content in snn on Open NeuromorphicHugo -- gohugo.ioen-usWed, 02 Aug 2023 00:00:00 +0000SNN library benchmarkshttps://open-neuromorphic.org/p/snn-library-benchmarks/Wed, 02 Aug 2023 00:00:00 +0000https://open-neuromorphic.org/p/snn-library-benchmarks/<img src="https://open-neuromorphic.org/p/snn-library-benchmarks/framework-benchmarking-16k-header.png" alt="Featured image of post SNN library benchmarks" />SNN library benchmarks Open Neuromorphic’s list of SNN frameworks currently counts 10 libraries, and those are only the most popular ones! As the sizes of spiking neural network models grow thanks to deep learning, optimization becomes more important for researchers and practitioners alike. Training SNNs is often slow, as the stateful networks are typically fed sequential inputs. Today’s most popular training method then is some form of backpropagation through time, whose time complexity scales with the number of time steps.Digital neuromophic hardware read listhttps://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/Wed, 11 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/<img src="https://open-neuromorphic.org/p/digital-neuromophic-hardware-read-list/frenkel-thesis.png" alt="Featured image of post Digital neuromophic hardware read list" />Here’s a list of articles and theses related to digital hardware designs for neuomorphic applications. I plan to update it regularly. To be redirected directly to the sources, click on the titles!
+If you are new to neuromorphic computing, I strongly suggest to get a grasp of how an SNN works from this paper. Otherwise, it will be pretty difficult to understand the content of the papers listed here.
+2015 TrueNorth: Design and Tool Flow of a 65 mW 1 Million Neuron Programmable Neurosynaptic Chip, Filipp Akopyan et al.Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer.
+In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron.
\ No newline at end of file
diff --git a/tags/snn/page/1/index.html b/tags/snn/page/1/index.html
new file mode 100644
index 00000000..dd129b78
--- /dev/null
+++ b/tags/snn/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/snn/
\ No newline at end of file
diff --git a/tags/spiking/index.html b/tags/spiking/index.html
new file mode 100644
index 00000000..7181d3db
--- /dev/null
+++ b/tags/spiking/index.html
@@ -0,0 +1,5 @@
+Tag: spiking - Open Neuromorphic
+
\ No newline at end of file
diff --git a/tags/spiking/index.xml b/tags/spiking/index.xml
new file mode 100644
index 00000000..50cadc5b
--- /dev/null
+++ b/tags/spiking/index.xml
@@ -0,0 +1,2 @@
+spiking on Open Neuromorphichttps://open-neuromorphic.org/tags/spiking/Recent content in spiking on Open NeuromorphicHugo -- gohugo.ioen-usMon, 02 Jan 2023 00:00:00 +0000Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer.
+In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron.
\ No newline at end of file
diff --git a/tags/spiking/page/1/index.html b/tags/spiking/page/1/index.html
new file mode 100644
index 00000000..c355ad55
--- /dev/null
+++ b/tags/spiking/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/spiking/
\ No newline at end of file
diff --git a/tags/verilog/index.html b/tags/verilog/index.html
new file mode 100644
index 00000000..b41d1051
--- /dev/null
+++ b/tags/verilog/index.html
@@ -0,0 +1,5 @@
+Tag: verilog - Open Neuromorphic
+
\ No newline at end of file
diff --git a/tags/verilog/index.xml b/tags/verilog/index.xml
new file mode 100644
index 00000000..91b6783d
--- /dev/null
+++ b/tags/verilog/index.xml
@@ -0,0 +1,2 @@
+verilog on Open Neuromorphichttps://open-neuromorphic.org/tags/verilog/Recent content in verilog on Open NeuromorphicHugo -- gohugo.ioen-usMon, 02 Jan 2023 00:00:00 +0000Spiking neurons: a digital hardware implementationhttps://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/Mon, 02 Jan 2023 00:00:00 +0000https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/<img src="https://open-neuromorphic.org/p/spiking-neurons-a-digital-hardware-implementation/loihi.png" alt="Featured image of post Spiking neurons: a digital hardware implementation" />Spiking neurons In this article, we will try to model a layer of Leaky Integrate and Fire (LIF) spiking neurons using digital hardware: registers, memories, adders and so on. To do so, we will consider a single output neuron connected to multiple input neurons from a previous layer.
+In a Spiking Neural Network (SNN), neurons communicate by means of spikes: these activation voltages are then converted to currents through the synapses, charging the membrane potential of the destination neuron.
\ No newline at end of file
diff --git a/tags/verilog/page/1/index.html b/tags/verilog/page/1/index.html
new file mode 100644
index 00000000..97572165
--- /dev/null
+++ b/tags/verilog/page/1/index.html
@@ -0,0 +1 @@
+https://open-neuromorphic.org/tags/verilog/
\ No newline at end of file
diff --git a/team/alexander-hadjiivanov.jpeg b/team/alexander-hadjiivanov.jpeg
new file mode 100644
index 00000000..cdffa33b
Binary files /dev/null and b/team/alexander-hadjiivanov.jpeg differ
diff --git a/team/alexander-hadjiivanov_hub8fcf2814c8e5b8f14748f2967d50efc_34856_1024x0_resize_q75_box.jpeg b/team/alexander-hadjiivanov_hub8fcf2814c8e5b8f14748f2967d50efc_34856_1024x0_resize_q75_box.jpeg
new file mode 100644
index 00000000..6dc396ed
Binary files /dev/null and b/team/alexander-hadjiivanov_hub8fcf2814c8e5b8f14748f2967d50efc_34856_1024x0_resize_q75_box.jpeg differ
diff --git a/team/alexander-hadjiivanov_hub8fcf2814c8e5b8f14748f2967d50efc_34856_480x0_resize_q75_box.jpeg b/team/alexander-hadjiivanov_hub8fcf2814c8e5b8f14748f2967d50efc_34856_480x0_resize_q75_box.jpeg
new file mode 100644
index 00000000..ae48cb58
Binary files /dev/null and b/team/alexander-hadjiivanov_hub8fcf2814c8e5b8f14748f2967d50efc_34856_480x0_resize_q75_box.jpeg differ
diff --git a/team/alexander-henkes.jpg b/team/alexander-henkes.jpg
new file mode 100644
index 00000000..764528d2
Binary files /dev/null and b/team/alexander-henkes.jpg differ
diff --git a/team/alexander-henkes_hu69c5a2c53edaa3fb83fe5cf6ef3a6a54_7739_1024x0_resize_q75_box.jpg b/team/alexander-henkes_hu69c5a2c53edaa3fb83fe5cf6ef3a6a54_7739_1024x0_resize_q75_box.jpg
new file mode 100644
index 00000000..6ab2d13b
Binary files /dev/null and b/team/alexander-henkes_hu69c5a2c53edaa3fb83fe5cf6ef3a6a54_7739_1024x0_resize_q75_box.jpg differ
diff --git a/team/alexander-henkes_hu69c5a2c53edaa3fb83fe5cf6ef3a6a54_7739_480x0_resize_q75_box.jpg b/team/alexander-henkes_hu69c5a2c53edaa3fb83fe5cf6ef3a6a54_7739_480x0_resize_q75_box.jpg
new file mode 100644
index 00000000..ffabdf4f
Binary files /dev/null and b/team/alexander-henkes_hu69c5a2c53edaa3fb83fe5cf6ef3a6a54_7739_480x0_resize_q75_box.jpg differ
diff --git a/team/catherine-schuman.jpg b/team/catherine-schuman.jpg
new file mode 100644
index 00000000..eb4169fe
Binary files /dev/null and b/team/catherine-schuman.jpg differ
diff --git a/team/charlotte-frenkel.jpg b/team/charlotte-frenkel.jpg
new file mode 100644
index 00000000..314bfaea
Binary files /dev/null and b/team/charlotte-frenkel.jpg differ
diff --git a/team/charlotte-frenkel_hu60129e51654de23dbae7e3ea857be115_12952_1024x0_resize_q75_box.jpg b/team/charlotte-frenkel_hu60129e51654de23dbae7e3ea857be115_12952_1024x0_resize_q75_box.jpg
new file mode 100644
index 00000000..5151c3d8
Binary files /dev/null and b/team/charlotte-frenkel_hu60129e51654de23dbae7e3ea857be115_12952_1024x0_resize_q75_box.jpg differ
diff --git a/team/charlotte-frenkel_hu60129e51654de23dbae7e3ea857be115_12952_480x0_resize_q75_box.jpg b/team/charlotte-frenkel_hu60129e51654de23dbae7e3ea857be115_12952_480x0_resize_q75_box.jpg
new file mode 100644
index 00000000..bd9b72f6
Binary files /dev/null and b/team/charlotte-frenkel_hu60129e51654de23dbae7e3ea857be115_12952_480x0_resize_q75_box.jpg differ
diff --git a/team/fabrizio-ottati.jpg b/team/fabrizio-ottati.jpg
new file mode 100644
index 00000000..3d00eac0
Binary files /dev/null and b/team/fabrizio-ottati.jpg differ
diff --git a/team/gregor-lenz.jpeg b/team/gregor-lenz.jpeg
new file mode 100644
index 00000000..fe60b3c1
Binary files /dev/null and b/team/gregor-lenz.jpeg differ
diff --git a/team/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_1024x0_resize_q75_box.jpeg b/team/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_1024x0_resize_q75_box.jpeg
new file mode 100644
index 00000000..18630779
Binary files /dev/null and b/team/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_1024x0_resize_q75_box.jpeg differ
diff --git a/team/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_480x0_resize_q75_box.jpeg b/team/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_480x0_resize_q75_box.jpeg
new file mode 100644
index 00000000..2bbd8587
Binary files /dev/null and b/team/gregor-lenz_hu401dcc7f29eb10f93ef8a57749c49e1b_50576_480x0_resize_q75_box.jpeg differ
diff --git a/team/henning-wessels.jpg b/team/henning-wessels.jpg
new file mode 100644
index 00000000..73d4d9f6
Binary files /dev/null and b/team/henning-wessels.jpg differ
diff --git a/team/index.html b/team/index.html
new file mode 100644
index 00000000..8e5f6f88
--- /dev/null
+++ b/team/index.html
@@ -0,0 +1,15 @@
+Team
Fabrizio Ottati is a Ph.D. student in the Department of Electronics and Communications of Politecnico di Torino, under the supervision of professor Luciano Lavagno and professor Mario Roberto Casu.
His main interests are event-based cameras, digital hardware design and automation, spiking neural networks and piedmontese red wine. He is the maintainer of two open source projects in the field of neuromorphic computing, Tonic and Expelliarmus. You can find more information on his website.
Gregor Lenz
Gregor Lenz graduated with a Ph.D. in neuromorphic engineering from Sorbonne University. He thinks that technology can learn a thing or two from how biological systems process information.
His main interests are event cameras that are inspired by the human retina and spiking neural networks that mimic human brain in an effort to teach machines to compute a bit more like humans do. At the very least there are some power efficiency gains to be made, but hopefully more! Also he loves to build open source software for spike-based machine learning. You can find more information on his personal website.
He is the maintainer of two open source projects in the field of neuromorphic computing, Tonic and expelliarmus.
Jason Eshraghian
Jason Eshraghian is an Assistant Professor at the Department of Electrical and Computer Engineering, University of California, Santa Cruz, leading the UCSC Neuromorphic Computing Group.
His research focuses on brain-inspired circuit design to accelerate AI algorithms and spiking neural networks. You can find more information on his personal website.
Charlotte Frenkel is an Assistant Professor at the Microelectronics department of Delft University of Technology, Delft, The Netherlands. Her research goals are:
to demonstrate a competitive advantage for neuromorphic computing devices compared to conventional neural network accelerators,
to uncover a framework toward on-chip neuromorphic intelligence for adaptive edge computing.
To achieve these goals, she is investigating both the bottom-up and the top-down design approaches, as well as their synergies (see personal website).
She is the designer of the ODIN and ReckOn open-source online-learning digital neuromorphic processors.
When faced with the complexity and ambiguity in the real world, contemporary algorithms fail spectacularly. There is a strong need for self-correcting, closed-loop systems to help solve our everyday physical problems.
By simulating and carefully scrutinizing and understanding neural circuits, including vision, motor control, and self-sustenance, Jens seeks to build autonomous systems that perform meaningful work, tightly following the Feynman axiom “What I cannot create, I do not understand”. You can find more information on his website.
Steve is doing his PhD on neuromorphic computing theory in the MINDS research group at the new CogniGron center for cognitive systems and materials in Groningen. He is funded by the European Post-Digital research network.
In his PhD, he works with different neuromorphic systems (Loihi 2, DynapSE2, and photonic reservoirs) to develop programming methods for devices that explore a richer set of physical dynamics than the synchronous bi-stable switching that (most of) computer science relies on. Steve’s background is in computer science and machine learning, with a touch of physics.
Alexander Henkes received the B.Sc. (Mechanical engineering) and M.Sc. (Mechanical engineering) degrees from University of Paderborn, Germany, in 2015 and 2018, respectively. In 2022 he received his Ph.D with honors from the Technical University of Braunschweig (TUBS), Germany, for his thesis “Artificial Neural Networks in Continuum Micromechanics”.
He is currently a Post-Doctoral Research Fellow at the Institute for Computational Modeling in Civil Engineering at TUBS. In 2022, he was elected as a junior member of the German Association of Applied Mathematics and Mechanics (GAMM) for his outstanding research in the field of artificial intelligence in continuum micromechanics.
His current research focuses on spiking neural networks (SNN). Recently he published a preprint on nonlinear history-dependent regression using SNN. This enables SNN to be used in the context of applied mathematics and computational engineering.
Alexander is currently a Research Fellow with the Advanced Concepts Team at the European Space Agency. His research efforts are focused on retinomorphic vision as well as the interplay between homeostasis, adaptation, axon guidance and structural plasticity in spiking neural networks.
No single branch of AI can claim the crown of true intelligence on its own. Rather, developing AI worthy of the ‘I’ would require a concerted effort to combine virtually all the branches - from perception through learning and cognition to reasoning and interaction. The most enticing aspect of neuromorphic computing is its potential to bring about this unification.
Peng Zhou
Peng Zhou received the Ph.D. degree from The University of California, Santa Cruz, under the supervision of Prof. Sung-Mo “Steve” Kang and Prof. Jason Eshraghian, and focused on neuromorphic computing, spiking neural networks, and their hardware implementation using memristors.
She was awarded “Best New Neuromorph” at the 2022 Telluride Workshop on Neuromorphic Cognition Engineering. She also obtained working experience from Synsense, a neuromorphic startup that spun out of the Institute of Neuroinformatics at the University of Zurich and ETH Zurich, and Tetramem, a computing-in-memory startup whose RRAM technologies are based on Yang research group at USC, Nanodevices and Integrated Systems Laboratory at UMass Amherst, and HP labs.
She is a contributor of the open-source SNN frameworks snnTorch and Rockpool. She also designs open-source SNN chips.
Shyam is a Ph.D. student in the Neuromorphic Cognitive Systems (NCS) group at the Institute of Neuroinformatics, University of Zurich, and ETH Zurich with Prof. Giacomo Indiveri.
After working as a chip designer in the industry for over a decade, he decided to pursue a Ph.D. in Bio-Inspired Wide Band Circuits and Systems for Edge Neuromorphic Computing. His research interests lie at the intersection of analog/mixed-signal IC design, Neuroscience, and Robotics.
He believes that Neuromorphic Engineering has its own merits and a coordinated effort from the community on device, circuits, and algorithm levels are needed to push the envelope.
Catherine Schuman
Catherine (Katie) Schuman is an Assistant Professor in the Department of Electrical Engineering and Computer Science at the University of Tennessee (UT). She received her Ph.D. in Computer Science from UT in 2015, where she completed her dissertation on the use of evolutionary algorithms to train spiking neural networks for neuromorphic systems. Katie previously served as a research scientist at Oak Ridge National Laboratory, where her research focused on algorithms and applications of neuromorphic systems. Katie co-leads the TENNLab Neuromorphic Computing Research Group at UT. She has over 100 publications as well as seven patents in the field of neuromorphic computing. She received the Department of Energy Early Career Award in 2019.
Melika Payvand
Melika Payvand is an Assistant Professor at the Institute of Neuroinformatics, University of Zurich and ETH Zurich. She received her PhD in Electrical and Computer Engineering at the University of California Santa Barbara. Her recent research interest is in developing on-device learning systems that form themselves to the sensory input in a real-time fashion. Specifically, she exploits the physics of resistive memory for synaptic, neuronal, dendritic and structural plasticity, inspired by the structure-function relationship of the brain circuits. She is the recipient of the 2023 “Swiss ERC” starting grant and “Best Neuromorph” award at 2019 Telluride workshop. She has co-coordinated the European project NEUROTECH, is the program co-chair of the International Conference on Neuromorphic Systems (ICONS) and co-organizes the scientific program of the Capocaccia Neuromorphic Intelligence workshop.
Henning Wessels
Henning Wessels is Assistant Professor at the Department of Civil and Environmental Engineering, Technical University Braunschweig, where he is leading the data-driven modeling group. His group is developing computational methods at the intersection of mechanics, numerics, machine learning and uncertainty quantification. Applications can be found, for instance, in virtual sensing and structural health monitoring. Here, SNN offer huge potential for data-driven mechanics on neuromorphic hardware within embedded systems.
+
+
+
+
+
\ No newline at end of file
diff --git a/team/jason-eshraghian.webp b/team/jason-eshraghian.webp
new file mode 100644
index 00000000..ffe17a39
Binary files /dev/null and b/team/jason-eshraghian.webp differ
diff --git a/team/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_1024x0_resize_q75_h2_box_2.webp b/team/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_1024x0_resize_q75_h2_box_2.webp
new file mode 100644
index 00000000..f5a6676b
Binary files /dev/null and b/team/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_1024x0_resize_q75_h2_box_2.webp differ
diff --git a/team/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_480x0_resize_q75_h2_box_2.webp b/team/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_480x0_resize_q75_h2_box_2.webp
new file mode 100644
index 00000000..b609d50c
Binary files /dev/null and b/team/jason-eshraghian_hu5d2d5366ae47912bc22f41487225bf0b_38920_480x0_resize_q75_h2_box_2.webp differ
diff --git a/team/jens-egholm.png b/team/jens-egholm.png
new file mode 100644
index 00000000..0e188fc4
Binary files /dev/null and b/team/jens-egholm.png differ
diff --git a/team/jens-egholm_hudf55b28927feb6152e01d07f54f7ef1e_320513_1024x0_resize_box_3.png b/team/jens-egholm_hudf55b28927feb6152e01d07f54f7ef1e_320513_1024x0_resize_box_3.png
new file mode 100644
index 00000000..dc8f6756
Binary files /dev/null and b/team/jens-egholm_hudf55b28927feb6152e01d07f54f7ef1e_320513_1024x0_resize_box_3.png differ
diff --git a/team/jens-egholm_hudf55b28927feb6152e01d07f54f7ef1e_320513_480x0_resize_box_3.png b/team/jens-egholm_hudf55b28927feb6152e01d07f54f7ef1e_320513_480x0_resize_box_3.png
new file mode 100644
index 00000000..3ef2b54a
Binary files /dev/null and b/team/jens-egholm_hudf55b28927feb6152e01d07f54f7ef1e_320513_480x0_resize_box_3.png differ
diff --git a/team/melika-payvand.jpg b/team/melika-payvand.jpg
new file mode 100644
index 00000000..20339bc5
Binary files /dev/null and b/team/melika-payvand.jpg differ
diff --git a/team/peng-zhou.jpeg b/team/peng-zhou.jpeg
new file mode 100644
index 00000000..1f8f23e6
Binary files /dev/null and b/team/peng-zhou.jpeg differ
diff --git a/team/peng-zhou_hu06543fcaee7fb3365185960f073e8649_25733_1024x0_resize_q75_box.jpeg b/team/peng-zhou_hu06543fcaee7fb3365185960f073e8649_25733_1024x0_resize_q75_box.jpeg
new file mode 100644
index 00000000..cd691091
Binary files /dev/null and b/team/peng-zhou_hu06543fcaee7fb3365185960f073e8649_25733_1024x0_resize_q75_box.jpeg differ
diff --git a/team/peng-zhou_hu06543fcaee7fb3365185960f073e8649_25733_480x0_resize_q75_box.jpeg b/team/peng-zhou_hu06543fcaee7fb3365185960f073e8649_25733_480x0_resize_q75_box.jpeg
new file mode 100644
index 00000000..3250a9f0
Binary files /dev/null and b/team/peng-zhou_hu06543fcaee7fb3365185960f073e8649_25733_480x0_resize_q75_box.jpeg differ
diff --git a/team/shyam-narayanan.jpeg b/team/shyam-narayanan.jpeg
new file mode 100644
index 00000000..540a4a90
Binary files /dev/null and b/team/shyam-narayanan.jpeg differ
diff --git a/team/shyam-narayanan_hu720f02c2842a352e1bd5c03ea8829556_29506_1024x0_resize_q75_box.jpeg b/team/shyam-narayanan_hu720f02c2842a352e1bd5c03ea8829556_29506_1024x0_resize_q75_box.jpeg
new file mode 100644
index 00000000..f8ea3cfd
Binary files /dev/null and b/team/shyam-narayanan_hu720f02c2842a352e1bd5c03ea8829556_29506_1024x0_resize_q75_box.jpeg differ
diff --git a/team/shyam-narayanan_hu720f02c2842a352e1bd5c03ea8829556_29506_480x0_resize_q75_box.jpeg b/team/shyam-narayanan_hu720f02c2842a352e1bd5c03ea8829556_29506_480x0_resize_q75_box.jpeg
new file mode 100644
index 00000000..6c6064f2
Binary files /dev/null and b/team/shyam-narayanan_hu720f02c2842a352e1bd5c03ea8829556_29506_480x0_resize_q75_box.jpeg differ
diff --git a/team/steven-abreu.jpg b/team/steven-abreu.jpg
new file mode 100644
index 00000000..0ef4dacb
Binary files /dev/null and b/team/steven-abreu.jpg differ
diff --git a/team/steven-abreu_hu19b4cb9d8e790ac29dad8a39fbedb7d3_110937_1024x0_resize_q75_box.jpg b/team/steven-abreu_hu19b4cb9d8e790ac29dad8a39fbedb7d3_110937_1024x0_resize_q75_box.jpg
new file mode 100644
index 00000000..4d8f9d5e
Binary files /dev/null and b/team/steven-abreu_hu19b4cb9d8e790ac29dad8a39fbedb7d3_110937_1024x0_resize_q75_box.jpg differ
diff --git a/team/steven-abreu_hu19b4cb9d8e790ac29dad8a39fbedb7d3_110937_480x0_resize_q75_box.jpg b/team/steven-abreu_hu19b4cb9d8e790ac29dad8a39fbedb7d3_110937_480x0_resize_q75_box.jpg
new file mode 100644
index 00000000..2ab0797d
Binary files /dev/null and b/team/steven-abreu_hu19b4cb9d8e790ac29dad8a39fbedb7d3_110937_480x0_resize_q75_box.jpg differ
diff --git a/ts/main.js b/ts/main.js
new file mode 100644
index 00000000..d3fedd90
--- /dev/null
+++ b/ts/main.js
@@ -0,0 +1,11 @@
+(()=>{var u=class{galleryUID;items=[];constructor(t,r=1){if(window.PhotoSwipe==null||window.PhotoSwipeUI_Default==null){console.error("PhotoSwipe lib not loaded.");return}this.galleryUID=r,u.createGallery(t),this.loadItems(t),this.bindClick()}loadItems(t){this.items=[];let r=t.querySelectorAll("figure.gallery-image");for(let i of r){let n=i.querySelector("figcaption"),o=i.querySelector("img"),s={w:parseInt(o.getAttribute("width")),h:parseInt(o.getAttribute("height")),src:o.src,msrc:o.getAttribute("data-thumb")||o.src,el:i};n&&(s.title=n.innerHTML),this.items.push(s)}}static createGallery(t){let r=t.querySelectorAll("img.gallery-image");for(let o of Array.from(r)){let s=o.closest("p");if(!s||!t.contains(s)||(s.textContent.trim()==""&&s.classList.add("no-text"),!s.classList.contains("no-text")))continue;let d=o.parentElement.tagName=="A",m=o,c=document.createElement("figure");if(c.style.setProperty("flex-grow",o.getAttribute("data-flex-grow")||"1"),c.style.setProperty("flex-basis",o.getAttribute("data-flex-basis")||"0"),d&&(m=o.parentElement),m.parentElement.insertBefore(c,m),c.appendChild(m),o.hasAttribute("alt")){let l=document.createElement("figcaption");l.innerText=o.getAttribute("alt"),c.appendChild(l)}if(!d){c.className="gallery-image";let l=document.createElement("a");l.href=o.src,l.setAttribute("target","_blank"),o.parentNode.insertBefore(l,o),l.appendChild(o)}}let i=t.querySelectorAll("figure.gallery-image"),n=[];for(let o of i)n.length?o.previousElementSibling===n[n.length-1]?n.push(o):n.length&&(u.wrap(n),n=[o]):n=[o];n.length>0&&u.wrap(n)}static wrap(t){let r=document.createElement("div");r.className="gallery";let i=t[0].parentNode,n=t[0];i.insertBefore(r,n);for(let o of t)r.appendChild(o)}open(t){let r=document.querySelector(".pswp");new window.PhotoSwipe(r,window.PhotoSwipeUI_Default,this.items,{index:t,galleryUID:this.galleryUID,getThumbBoundsFn:n=>{let o=this.items[n].el.getElementsByTagName("img")[0],s=window.pageYOffset||document.documentElement.scrollTop,a=o.getBoundingClientRect();return{x:a.left,y:a.top+s,w:a.width}}}).init()}bindClick(){for(let[t,r]of this.items.entries())r.el.querySelector("a").addEventListener("click",n=>{n.preventDefault(),this.open(t)})}},b=u;var h={};if(localStorage.hasOwnProperty("StackColorsCache"))try{h=JSON.parse(localStorage.getItem("StackColorsCache"))}catch{h={}}async function S(e,t,r){if(!e)return await Vibrant.from(r).getPalette();if(!h.hasOwnProperty(e)||h[e].hash!==t){let i=await Vibrant.from(r).getPalette();h[e]={hash:t,Vibrant:{hex:i.Vibrant.hex,rgb:i.Vibrant.rgb,bodyTextColor:i.Vibrant.bodyTextColor},DarkMuted:{hex:i.DarkMuted.hex,rgb:i.DarkMuted.rgb,bodyTextColor:i.DarkMuted.bodyTextColor}},localStorage.setItem("StackColorsCache",JSON.stringify(h))}return h[e]}var D=(e,t=500)=>{e.classList.add("transiting"),e.style.transitionProperty="height, margin, padding",e.style.transitionDuration=t+"ms",e.style.height=e.offsetHeight+"px",e.offsetHeight,e.style.overflow="hidden",e.style.height="0",e.style.paddingTop="0",e.style.paddingBottom="0",e.style.marginTop="0",e.style.marginBottom="0",window.setTimeout(()=>{e.classList.remove("show"),e.style.removeProperty("height"),e.style.removeProperty("padding-top"),e.style.removeProperty("padding-bottom"),e.style.removeProperty("margin-top"),e.style.removeProperty("margin-bottom"),e.style.removeProperty("overflow"),e.style.removeProperty("transition-duration"),e.style.removeProperty("transition-property"),e.classList.remove("transiting")},t)},q=(e,t=500)=>{e.classList.add("transiting"),e.style.removeProperty("display"),e.classList.add("show");let r=e.offsetHeight;e.style.overflow="hidden",e.style.height="0",e.style.paddingTop="0",e.style.paddingBottom="0",e.style.marginTop="0",e.style.marginBottom="0",e.offsetHeight,e.style.transitionProperty="height, margin, padding",e.style.transitionDuration=t+"ms",e.style.height=r+"px",e.style.removeProperty("padding-top"),e.style.removeProperty("padding-bottom"),e.style.removeProperty("margin-top"),e.style.removeProperty("margin-bottom"),window.setTimeout(()=>{e.style.removeProperty("height"),e.style.removeProperty("overflow"),e.style.removeProperty("transition-duration"),e.style.removeProperty("transition-property"),e.classList.remove("transiting")},t)},B=(e,t=500)=>window.getComputedStyle(e).display==="none"?q(e,t):D(e,t);function v(){let e=document.getElementById("toggle-menu");e&&e.addEventListener("click",()=>{document.getElementById("main-menu").classList.contains("transiting")||(document.body.classList.toggle("show-menu"),B(document.getElementById("main-menu"),300),e.classList.toggle("is-active"))})}function N(e,t,r){var i=document.createElement(e);for(let n in t)if(n&&t.hasOwnProperty(n)){let o=t[n];n=="dangerouslySetInnerHTML"?i.innerHTML=o.__html:o===!0?i.setAttribute(n,n):o!==!1&&o!=null&&i.setAttribute(n,o.toString())}for(let n=2;n{this.isDark()?this.currentScheme="light":this.currentScheme="dark",this.setBodyClass(),this.currentScheme==this.systemPreferScheme&&(this.currentScheme="auto"),this.saveScheme()})}isDark(){return this.currentScheme=="dark"||this.currentScheme=="auto"&&this.systemPreferScheme=="dark"}dispatchEvent(t){let r=new CustomEvent("onColorSchemeChange",{detail:t});window.dispatchEvent(r)}setBodyClass(){this.isDark()?document.documentElement.dataset.scheme="dark":document.documentElement.dataset.scheme="light",this.dispatchEvent(document.documentElement.dataset.scheme)}getSavedScheme(){let t=localStorage.getItem(this.localStorageKey);return t=="light"||t=="dark"||t=="auto"?t:"auto"}bindMatchMedia(){window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change",t=>{t.matches?this.systemPreferScheme="dark":this.systemPreferScheme="light",this.setBodyClass()})}},E=y;function g(e){let t;return()=>{t&&window.cancelAnimationFrame(t),t=window.requestAnimationFrame(()=>e())}}var O=".article-content h1[id], .article-content h2[id], .article-content h3[id], .article-content h4[id], .article-content h5[id], .article-content h6[id]",T="#TableOfContents",L="#TableOfContents li",C="active-class";function V(e,t){let r=e.querySelector("a").offsetHeight,i=e.offsetTop-t.offsetHeight/2+r/2-t.offsetTop;i<0&&(i=0),t.scrollTo({top:i,behavior:"smooth"})}function U(e){let t={};return e.forEach(r=>{let n=r.querySelector("a").getAttribute("href");n.startsWith("#")&&(t[n.slice(1)]=r)}),t}function k(e){let t=[];return e.forEach(r=>{t.push({id:r.id,offset:r.offsetTop})}),t.sort((r,i)=>r.offset-i.offset),t}function M(){let e=document.querySelectorAll(O);if(!e){console.warn("No header matched query",e);return}let t=document.querySelector(T);if(!t){console.warn("No toc matched query",T);return}let r=document.querySelectorAll(L);if(!r){console.warn("No navigation matched query",L);return}let i=k(e),n=!1;t.addEventListener("mouseenter",g(()=>n=!0)),t.addEventListener("mouseleave",g(()=>n=!1));let o,s=U(r);function a(){let m=document.documentElement.scrollTop||document.body.scrollTop,c;i.forEach(p=>{m>=p.offset-20&&(c=document.getElementById(p.id))});let l;c&&(l=s[c.id]),c&&!l?console.debug("No link found for section",c):l!==o&&(o&&o.classList.remove(C),l&&(l.classList.add(C),n||V(l,t)),o=l)}window.addEventListener("scroll",g(a));function d(){i=k(e),a()}window.addEventListener("resize",g(d))}var $="a[href]";function P(){document.querySelectorAll($).forEach(e=>{e.getAttribute("href").startsWith("#")&&e.addEventListener("click",r=>{r.preventDefault();let i=e.getAttribute("href").substring(1),n=document.getElementById(i),o=n.getBoundingClientRect().top-document.documentElement.getBoundingClientRect().top;window.history.pushState({},"",e.getAttribute("href")),scrollTo({top:o,behavior:"smooth"})})})}var x={init:()=>{v();let e=document.querySelector(".article-content");e&&(new b(e),P(),M());let t=document.querySelector(".article-list--tile");t&&new IntersectionObserver(async(s,a)=>{s.forEach(d=>{if(!d.isIntersecting)return;a.unobserve(d.target),d.target.querySelectorAll("article.has-image").forEach(async c=>{let l=c.querySelector("img"),p=l.src,H=l.getAttribute("data-key"),I=l.getAttribute("data-hash"),A=c.querySelector(".article-details"),f=await S(H,I,p);A.style.background=`
+ linear-gradient(0deg,
+ rgba(${f.DarkMuted.rgb[0]}, ${f.DarkMuted.rgb[1]}, ${f.DarkMuted.rgb[2]}, 0.5) 0%,
+ rgba(${f.Vibrant.rgb[0]}, ${f.Vibrant.rgb[1]}, ${f.Vibrant.rgb[2]}, 0.75) 100%)`})})}).observe(t);let r=document.querySelectorAll(".article-content div.highlight"),i="Copy",n="Copied!";r.forEach(o=>{let s=document.createElement("button");s.innerHTML=i,s.classList.add("copyCodeButton"),o.appendChild(s);let a=o.querySelector("code[data-lang]");a&&s.addEventListener("click",()=>{navigator.clipboard.writeText(a.textContent).then(()=>{s.textContent=n,setTimeout(()=>{s.textContent=i},1e3)}).catch(d=>{alert(d),console.log("Something went wrong",d)})})}),new E(document.getElementById("dark-mode-toggle"))}};window.addEventListener("load",()=>{setTimeout(function(){x.init()},0)});window.Stack=x;window.createElement=w;})();
+/*!
+* Hugo Theme Stack
+*
+* @author: Jimmy Cai
+* @website: https://jimmycai.com
+* @link: https://github.com/CaiJimmy/hugo-theme-stack
+*/