-
Notifications
You must be signed in to change notification settings - Fork 0
/
references.bib
1575 lines (1473 loc) · 123 KB
/
references.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@article{bing_supervised_2019,
title = {Supervised Learning in {SNN} via Reward-Modulated Spike-Timing-Dependent Plasticity for a Target Reaching Vehicle},
volume = {13},
issn = {1662-5218},
url = {https://www.frontiersin.org/article/10.3389/fnbot.2019.00018/full},
doi = {10.3389/fnbot.2019.00018},
pages = {18},
journaltitle = {Frontiers in Neurorobotics},
shortjournal = {Front. Neurorobot.},
author = {Bing, Zhenshan and Baumann, Ivan and Jiang, Zhuangyi and Huang, Kai and Cai, Caixia and Knoll, Alois},
urldate = {2022-03-31},
date = {2019-05-03},
file = {Full Text:/home/max/Zotero/storage/ISZP26ET/Bing et al. - 2019 - Supervised Learning in SNN via Reward-Modulated Sp.pdf:application/pdf},
}
@inproceedings{huang_optimizing_2017,
title = {Optimizing the dynamics of spiking networks for decoding and control},
doi = {10.23919/ACC.2017.7963374},
abstract = {In this paper, an optimization-based approach to construct spiking networks for the purposes of decoding and control is presented. Specifically, we postulate a simple objective function wherein a network of interacting, primitive spiking units is decoded in order to drive a linear system along a prescribed trajectory. The units are assumed to spike only if doing so will decrease a specified objective function. The optimization gives rise to an emergent network of neurons with diffusive dynamics and a threshold-based spiking rule that bears resemblance to the Integrate and Fire neural model.},
eventtitle = {2017 American Control Conference ({ACC})},
pages = {2792--2798},
booktitle = {2017 American Control Conference ({ACC})},
author = {Huang, Fuqiang and Riehl, James and Ching, {ShiNung}},
date = {2017-05},
note = {{ISSN}: 2378-5861},
keywords = {Biological neural networks, Control systems, Decoding, Linear programming, Linear systems, Neurons, Optimization},
file = {IEEE Xplore Abstract Record:/home/max/Zotero/storage/GXAYJL4Y/7963374.html:text/html;IEEE Xplore Full Text PDF:/home/max/Zotero/storage/JQTNHXJB/Huang et al. - 2017 - Optimizing the dynamics of spiking networks for de.pdf:application/pdf},
}
@article{boerlin_predictive_2013,
title = {Predictive Coding of Dynamical Variables in Balanced Spiking Networks},
volume = {9},
issn = {1553-7358},
url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1003258},
doi = {10.1371/journal.pcbi.1003258},
abstract = {Two observations about the cortex have puzzled neuroscientists for a long time. First, neural responses are highly variable. Second, the level of excitation and inhibition received by each neuron is tightly balanced at all times. Here, we demonstrate that both properties are necessary consequences of neural networks that represent information efficiently in their spikes. We illustrate this insight with spiking networks that represent dynamical variables. Our approach is based on two assumptions: We assume that information about dynamical variables can be read out linearly from neural spike trains, and we assume that neurons only fire a spike if that improves the representation of the dynamical variables. Based on these assumptions, we derive a network of leaky integrate-and-fire neurons that is able to implement arbitrary linear dynamical systems. We show that the membrane voltage of the neurons is equivalent to a prediction error about a common population-level signal. Among other things, our approach allows us to construct an integrator network of spiking neurons that is robust against many perturbations. Most importantly, neural variability in our networks cannot be equated to noise. Despite exhibiting the same single unit properties as widely used population code models (e.g. tuning curves, Poisson distributed spike trains), balanced networks are orders of magnitudes more reliable. Our approach suggests that spikes do matter when considering how the brain computes, and that the reliability of cortical representations could have been strongly underestimated.},
pages = {e1003258},
number = {11},
journaltitle = {{PLOS} Computational Biology},
shortjournal = {{PLOS} Computational Biology},
author = {Boerlin, Martin and Machens, Christian K. and Denève, Sophie},
urldate = {2022-09-20},
date = {2013-11-14},
langid = {english},
note = {Publisher: Public Library of Science},
keywords = {Neurons, Action potentials, Dynamical systems, Membrane potential, Network analysis, Neural networks, Neuronal tuning, Sensory perception},
file = {Full Text PDF:/home/max/Zotero/storage/PA8LXANX/Boerlin et al. - 2013 - Predictive Coding of Dynamical Variables in Balanc.pdf:application/pdf;Snapshot:/home/max/Zotero/storage/ACMFCVFE/article.html:text/html},
}
@article{brendel_learning_2020,
title = {Learning to represent signals spike by spike},
volume = {16},
issn = {1553-7358},
url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1007692},
doi = {10.1371/journal.pcbi.1007692},
abstract = {Networks based on coordinated spike coding can encode information with high efficiency in the spike trains of individual neurons. These networks exhibit single-neuron variability and tuning curves as typically observed in cortex, but paradoxically coincide with a precise, non-redundant spike-based population code. However, it has remained unclear whether the specific synaptic connectivities required in these networks can be learnt with local learning rules. Here, we show how to learn the required architecture. Using coding efficiency as an objective, we derive spike-timing-dependent learning rules for a recurrent neural network, and we provide exact solutions for the networks’ convergence to an optimal state. As a result, we deduce an entire network from its input distribution and a firing cost. After learning, basic biophysical quantities such as voltages, firing thresholds, excitation, inhibition, or spikes acquire precise functional interpretations.},
pages = {e1007692},
number = {3},
journaltitle = {{PLOS} Computational Biology},
shortjournal = {{PLOS} Computational Biology},
author = {Brendel, Wieland and Bourdoukan, Ralph and Vertechi, Pietro and Machens, Christian K. and Denève, Sophie},
urldate = {2022-09-20},
date = {2020-03-16},
langid = {english},
note = {Publisher: Public Library of Science},
keywords = {Neurons, Action potentials, Membrane potential, Neural networks, Neuronal tuning, Coding mechanisms, Signaling networks, Speech signal processing},
file = {Full Text PDF:/home/max/Zotero/storage/CZV96R6W/Brendel et al. - 2020 - Learning to represent signals spike by spike.pdf:application/pdf;Snapshot:/home/max/Zotero/storage/4QSKUIUN/article.html:text/html},
}
@online{noauthor_elsevier_nodate,
title = {Elsevier Enhanced Reader},
url = {https://reader.elsevier.com/reader/sd/pii/S0893608019303181?token=C6EE78C8DB33212369B121653DF2B3F9D33D1824C163EE4735825EDD496E32302EB459DD90100FEFE7959B6E5B19B438&originRegion=eu-west-1&originCreation=20220920145904},
urldate = {2022-09-20},
langid = {english},
doi = {10.1016/j.neunet.2019.09.036},
file = {Full Text:/home/max/Zotero/storage/SZC73ZSZ/Elsevier Enhanced Reader.pdf:application/pdf;Snapshot:/home/max/Zotero/storage/AE8SZ8GB/S0893608019303181.html:text/html},
}
@article{taherkhani_review_2020,
title = {A review of learning in biologically plausible spiking neural networks},
volume = {122},
issn = {0893-6080},
url = {https://www.sciencedirect.com/science/article/pii/S0893608019303181},
doi = {10.1016/j.neunet.2019.09.036},
abstract = {Artificial neural networks have been used as a powerful processing tool in various areas such as pattern recognition, control, robotics, and bioinformatics. Their wide applicability has encouraged researchers to improve artificial neural networks by investigating the biological brain. Neurological research has significantly progressed in recent years and continues to reveal new characteristics of biological neurons. New technologies can now capture temporal changes in the internal activity of the brain in more detail and help clarify the relationship between brain activity and the perception of a given stimulus. This new knowledge has led to a new type of artificial neural network, the Spiking Neural Network ({SNN}), that draws more faithfully on biological properties to provide higher processing abilities. A review of recent developments in learning of spiking neurons is presented in this paper. First the biological background of {SNN} learning algorithms is reviewed. The important elements of a learning algorithm such as the neuron model, synaptic plasticity, information encoding and {SNN} topologies are then presented. Then, a critical review of the state-of-the-art learning algorithms for {SNNs} using single and multiple spikes is presented. Additionally, deep spiking neural networks are reviewed, and challenges and opportunities in the {SNN} field are discussed.},
pages = {253--272},
journaltitle = {Neural Networks},
shortjournal = {Neural Networks},
author = {Taherkhani, Aboozar and Belatreche, Ammar and Li, Yuhua and Cosma, Georgina and Maguire, Liam P. and {McGinnity}, T. M.},
urldate = {2022-09-20},
date = {2020-02-01},
langid = {english},
keywords = {Learning, Spiking neural network ({SNN}), Synaptic plasticity},
file = {Full Text:/home/max/Zotero/storage/HKRNSUWU/Taherkhani et al. - 2020 - A review of learning in biologically plausible spi.pdf:application/pdf;ScienceDirect Snapshot:/home/max/Zotero/storage/CYGAQY8Z/S0893608019303181.html:text/html},
}
@article{zheng_introductory_2022,
title = {An Introductory Review of Spiking Neural Network and Artificial Neural Network: From Biological Intelligence to Artificial Intelligence},
url = {http://arxiv.org/abs/2204.07519},
shorttitle = {An Introductory Review of Spiking Neural Network and Artificial Neural Network},
abstract = {Recently, stemming from the rapid development of artificial intelligence, which has gained expansive success in pattern recognition, robotics, and bioinformatics, neuroscience is also gaining tremendous progress. A kind of spiking neural network with biological interpretability is gradually receiving wide attention, and this kind of neural network is also regarded as one of the directions toward general artificial intelligence. This review introduces the following sections, the biological background of spiking neurons and the theoretical basis, different neuronal models, the connectivity of neural circuits, the mainstream neural network learning mechanisms and network architectures, etc. This review hopes to attract different researchers and advance the development of brain-inspired intelligence and artificial intelligence.},
journaltitle = {{arXiv}:2204.07519 [cs]},
author = {Zheng, Shengjie and Qian, Lang and Li, Pingsheng and He, Chenggang and Qin, Xiaoqin and Li, Xiaojian},
urldate = {2022-09-20},
date = {2022-04-09},
eprinttype = {arxiv},
eprint = {2204.07519},
keywords = {Computer Science - Artificial Intelligence, Computer Science - Neural and Evolutionary Computing},
file = {arXiv Fulltext PDF:/home/max/Zotero/storage/3PER3Y9Y/Zheng et al. - 2022 - An Introductory Review of Spiking Neural Network a.pdf:application/pdf;arXiv.org Snapshot:/home/max/Zotero/storage/CZ94M8PX/2204.html:text/html},
}
@article{izhikevich_simple_2003,
title = {Simple model of spiking neurons},
volume = {14},
issn = {1941-0093},
doi = {10.1109/TNN.2003.820440},
abstract = {A model is presented that reproduces spiking and bursting behavior of known types of cortical neurons. The model combines the biologically plausibility of Hodgkin-Huxley-type dynamics and the computational efficiency of integrate-and-fire neurons. Using this model, one can simulate tens of thousands of spiking cortical neurons in real time (1 ms resolution) using a desktop {PC}.},
pages = {1569--1572},
number = {6},
journaltitle = {{IEEE} Transactions on Neural Networks},
author = {Izhikevich, E.M.},
date = {2003-11},
note = {Conference Name: {IEEE} Transactions on Neural Networks},
keywords = {Neurons, Bifurcation, Biological system modeling, Biology computing, Biomembranes, Brain modeling, Computational modeling, Large-scale systems, Mathematical analysis, Mathematical model},
file = {Full Text:/home/max/Zotero/storage/86M9DHRU/Izhikevich - 2003 - Simple model of spiking neurons.pdf:application/pdf;IEEE Xplore Abstract Record:/home/max/Zotero/storage/VARGU5K2/1257420.html:text/html},
}
@book{johnston_foundations_1995,
location = {Cambridge, Mass},
title = {Foundations of cellular neurophysiology},
isbn = {978-0-262-10053-3},
pagetotal = {676},
publisher = {{MIT} Press},
author = {Johnston, Daniel and Wu, Samuel Miao-sin},
date = {1995},
keywords = {Neurons, Ion Channels, Neurophysiology, physiology, Synaptic Transmission},
}
@article{hodgkin_quantitative_1952,
title = {A quantitative description of membrane current and its application to conduction and excitation in nerve},
volume = {117},
issn = {1469-7793},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1113/jphysiol.1952.sp004764},
doi = {10.1113/jphysiol.1952.sp004764},
pages = {500--544},
number = {4},
journaltitle = {The Journal of Physiology},
author = {Hodgkin, A. L. and Huxley, A. F.},
urldate = {2022-09-21},
date = {1952},
langid = {english},
note = {\_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1113/jphysiol.1952.sp004764},
file = {Full Text PDF:/home/max/Zotero/storage/HZXTUGM9/Hodgkin and Huxley - 1952 - A quantitative description of membrane current and.pdf:application/pdf;Snapshot:/home/max/Zotero/storage/5SKVB6M9/jphysiol.1952.html:text/html},
}
@article{azevedo_equal_2009,
title = {Equal numbers of neuronal and nonneuronal cells make the human brain an isometrically scaled-up primate brain},
volume = {513},
issn = {1096-9861},
doi = {10.1002/cne.21974},
abstract = {The human brain is often considered to be the most cognitively capable among mammalian brains and to be much larger than expected for a mammal of our body size. Although the number of neurons is generally assumed to be a determinant of computational power, and despite the widespread quotes that the human brain contains 100 billion neurons and ten times more glial cells, the absolute number of neurons and glial cells in the human brain remains unknown. Here we determine these numbers by using the isotropic fractionator and compare them with the expected values for a human-sized primate. We find that the adult male human brain contains on average 86.1 +/- 8.1 billion {NeuN}-positive cells ("neurons") and 84.6 +/- 9.8 billion {NeuN}-negative ("nonneuronal") cells. With only 19\% of all neurons located in the cerebral cortex, greater cortical size (representing 82\% of total brain mass) in humans compared with other primates does not reflect an increased relative number of cortical neurons. The ratios between glial cells and neurons in the human brain structures are similar to those found in other primates, and their numbers of cells match those expected for a primate of human proportions. These findings challenge the common view that humans stand out from other primates in their brain composition and indicate that, with regard to numbers of neuronal and nonneuronal cells, the human brain is an isometrically scaled-up primate brain.},
pages = {532--541},
number = {5},
journaltitle = {The Journal of Comparative Neurology},
shortjournal = {J Comp Neurol},
author = {Azevedo, Frederico A. C. and Carvalho, Ludmila R. B. and Grinberg, Lea T. and Farfel, José Marcelo and Ferretti, Renata E. L. and Leite, Renata E. P. and Jacob Filho, Wilson and Lent, Roberto and Herculano-Houzel, Suzana},
date = {2009-04-10},
pmid = {19226510},
keywords = {Neurons, Aged, Antigens, Nuclear, Brain, Cerebral Cortex, Humans, Immunohistochemistry, Male, Middle Aged, Nerve Tissue Proteins, Neuroglia},
}
@article{huang_dynamics_2019,
title = {Dynamics and Control in Spiking Neural Networks},
url = {https://openscholarship.wustl.edu/eng_etds/495},
doi = {10.7936/YA3F-RK28},
author = {Huang, Fuqiang},
urldate = {2022-10-14},
date = {2019-12-15},
note = {Publisher: Washington University in St. Louis},
}
@article{deneve_efficient_2016,
title = {Efficient codes and balanced networks},
volume = {19},
issn = {1097-6256, 1546-1726},
url = {http://www.nature.com/articles/nn.4243},
doi = {10.1038/nn.4243},
pages = {375--382},
number = {3},
journaltitle = {Nature Neuroscience},
shortjournal = {Nat Neurosci},
author = {Denève, Sophie and Machens, Christian K},
urldate = {2022-10-18},
date = {2016-03},
langid = {english},
}
@article{huang_spiking_2019,
title = {Spiking networks as efficient distributed controllers},
volume = {113},
issn = {0340-1200, 1432-0770},
url = {http://link.springer.com/10.1007/s00422-018-0769-7},
doi = {10.1007/s00422-018-0769-7},
pages = {179--190},
number = {1},
journaltitle = {Biological Cybernetics},
shortjournal = {Biol Cybern},
author = {Huang, Fuqiang and Ching, {ShiNung}},
urldate = {2022-10-23},
date = {2019-04},
langid = {english},
}
@article{tanaka_recent_2019,
title = {Recent advances in physical reservoir computing: A review},
volume = {115},
issn = {08936080},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0893608019300784},
doi = {10.1016/j.neunet.2019.03.005},
shorttitle = {Recent advances in physical reservoir computing},
pages = {100--123},
journaltitle = {Neural Networks},
shortjournal = {Neural Networks},
author = {Tanaka, Gouhei and Yamane, Toshiyuki and Héroux, Jean Benoit and Nakane, Ryosho and Kanazawa, Naoki and Takeda, Seiji and Numata, Hidetoshi and Nakano, Daiju and Hirose, Akira},
urldate = {2022-10-29},
date = {2019-07},
langid = {english},
file = {Full Text:/home/max/Zotero/storage/HCM2U7AB/Tanaka et al. - 2019 - Recent advances in physical reservoir computing A.pdf:application/pdf},
}
@inbook{cooper_liquid_2011,
title = {Liquid State Machines: Motivation, Theory, and Applications},
isbn = {978-1-84816-277-8},
url = {http://www.worldscientific.com/doi/abs/10.1142/9781848162778_0008},
shorttitle = {Liquid State Machines},
pages = {275--296},
booktitle = {Computability in Context},
publisher = {{IMPERIAL} {COLLEGE} {PRESS}},
author = {Maass, Wolfgang},
bookauthor = {Cooper, S Barry and Sorbi, Andrea},
urldate = {2022-10-31},
date = {2011-02},
langid = {english},
doi = {10.1142/9781848162778_0008},
}
@article{maass_computational_2004,
title = {On the computational power of circuits of spiking neurons},
volume = {69},
issn = {00220000},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0022000004000406},
doi = {10.1016/j.jcss.2004.04.001},
pages = {593--616},
number = {4},
journaltitle = {Journal of Computer and System Sciences},
shortjournal = {Journal of Computer and System Sciences},
author = {Maass, Wolfgang and Markram, Henry},
urldate = {2022-11-07},
date = {2004-12},
langid = {english},
file = {Full Text:/home/max/Zotero/storage/SAY2ARZX/Maass and Markram - 2004 - On the computational power of circuits of spiking .pdf:application/pdf},
}
@book{dayan_theoretical_2001,
location = {Cambridge, Mass},
title = {Theoretical neuroscience: computational and mathematical modeling of neural systems},
isbn = {978-0-262-04199-7},
series = {Computational neuroscience},
shorttitle = {Theoretical neuroscience},
pagetotal = {460},
publisher = {Massachusetts Institute of Technology Press},
author = {Dayan, Peter and Abbott, L. F.},
date = {2001},
langid = {english},
keywords = {Computational neuroscience, Computer simulation, Human information processing, Neural networks (Neurobiology)},
file = {Dayan and Abbott - 2001 - Theoretical neuroscience computational and mathem.pdf:/home/max/Zotero/storage/ZXQ2I2D2/Dayan and Abbott - 2001 - Theoretical neuroscience computational and mathem.pdf:application/pdf},
}
@article{maass_computational_2007,
title = {Computational Aspects of Feedback in Neural Circuits},
volume = {3},
issn = {1553-7358},
url = {https://dx.plos.org/10.1371/journal.pcbi.0020165},
doi = {10.1371/journal.pcbi.0020165},
pages = {e165},
number = {1},
journaltitle = {{PLoS} Computational Biology},
shortjournal = {{PLoS} Comput Biol},
author = {Maass, Wolfgang and Joshi, Prashant and Sontag, Eduardo D},
editor = {Kotter, Rolf},
urldate = {2022-11-07},
date = {2007-01-19},
langid = {english},
file = {Full Text:/home/max/Zotero/storage/JKI7GQUS/Maass et al. - 2007 - Computational Aspects of Feedback in Neural Circui.pdf:application/pdf},
}
@article{verstraeten_experimental_2007,
title = {An experimental unification of reservoir computing methods},
volume = {20},
issn = {08936080},
url = {https://linkinghub.elsevier.com/retrieve/pii/S089360800700038X},
doi = {10.1016/j.neunet.2007.04.003},
pages = {391--403},
number = {3},
journaltitle = {Neural Networks},
shortjournal = {Neural Networks},
author = {Verstraeten, D. and Schrauwen, B. and D’Haene, M. and Stroobandt, D.},
urldate = {2022-11-07},
date = {2007-04},
langid = {english},
}
@report{rullan_buxo_poisson_2019,
title = {Poisson balanced spiking networks},
url = {http://biorxiv.org/lookup/doi/10.1101/836601},
abstract = {Abstract
An important problem in computational neuroscience is to understand how networks of spiking neurons can carry out various computations underlying behavior. Balanced spiking networks ({BSNs}) provide a powerful framework for implementing arbitrary linear dynamical systems in networks of integrate-and-fire neurons (Boerlin et al. [1]). However, the classic {BSN} model requires near-instantaneous transmission of spikes between neurons, which is biologically implausible. Introducing realistic synaptic delays leads to an pathological regime known as “ping-ponging”, in which different populations spike maximally in alternating time bins, causing network output to overshoot the target solution. Here we document this phenomenon and provide a novel solution: we show that a network can have realistic synaptic delays while maintaining accuracy and stability if neurons are endowed with conditionally Poisson firing. Formally, we propose two alternate formulations of Poisson balanced spiking networks: (1) a “local” framework, which replaces the hard integrate-and-fire spiking rule within each neuron by a “soft” threshold function, such that firing probability grows as a smooth nonlinear function of membrane potential; and (2) a “population” framework, which reformulates the {BSN} objective function in terms of expected spike counts over the entire population. We show that both approaches offer improved robustness, allowing for accurate implementation of network dynamics with realistic synaptic delays between neurons. Moreover, both models produce positive correlations between similarly tuned neurons, a feature of real neural populations that is not found in the original {BSN}. This work unifies balanced spiking networks with Poisson generalized linear models and suggests several promising avenues for future research.},
institution = {Neuroscience},
type = {preprint},
author = {Rullán Buxó, Camille E. and Pillow, Jonathan W.},
urldate = {2022-11-07},
date = {2019-11-09},
langid = {english},
doi = {10.1101/836601},
file = {Full Text:/home/max/Zotero/storage/GY3X7XG9/Rullán Buxó and Pillow - 2019 - Poisson balanced spiking networks.pdf:application/pdf},
}
@article{andrew_spiking_2003,
title = {Spiking Neuron Models: Single Neurons, Populations, Plasticity},
volume = {32},
issn = {0368-492X},
url = {https://www.emerald.com/insight/content/doi/10.1108/k.2003.06732gae.003/full/html},
doi = {10.1108/k.2003.06732gae.003},
shorttitle = {Spiking Neuron Models},
number = {7},
journaltitle = {Kybernetes},
author = {Andrew, Alex M.},
urldate = {2022-11-15},
date = {2003-10-01},
langid = {english},
}
@article{almomani_comparative_2019,
title = {A comparative study on spiking neural network encoding schema: implemented with cloud computing},
volume = {22},
issn = {1386-7857, 1573-7543},
url = {http://link.springer.com/10.1007/s10586-018-02891-0},
doi = {10.1007/s10586-018-02891-0},
shorttitle = {A comparative study on spiking neural network encoding schema},
pages = {419--433},
number = {2},
journaltitle = {Cluster Computing},
shortjournal = {Cluster Comput},
author = {Almomani, Ammar and Alauthman, Mohammad and Alweshah, Mohammed and Dorgham, O. and Albalas, Firas},
urldate = {2022-11-15},
date = {2019-06},
langid = {english},
}
@article{adrian_impulses_1926,
title = {The impulses produced by sensory nerve-endings: Part {II}. The response of a Single End-Organ},
volume = {61},
issn = {00223751},
url = {https://onlinelibrary.wiley.com/doi/10.1113/jphysiol.1926.sp002281},
doi = {10.1113/jphysiol.1926.sp002281},
shorttitle = {The impulses produced by sensory nerve-endings},
pages = {151--171},
number = {2},
journaltitle = {The Journal of Physiology},
author = {Adrian, E. D. and Zotterman, Yngve},
urldate = {2022-11-16},
date = {1926-04-23},
langid = {english},
file = {Full Text:/home/max/Zotero/storage/GDHUCEUY/Adrian and Zotterman - 1926 - The impulses produced by sensory nerve-endings Pa.pdf:application/pdf},
}
@article{brette_philosophy_2015,
title = {Philosophy of the Spike: Rate-Based vs. Spike-Based Theories of the Brain},
volume = {9},
issn = {1662-5137},
url = {http://journal.frontiersin.org/Article/10.3389/fnsys.2015.00151/abstract},
doi = {10.3389/fnsys.2015.00151},
shorttitle = {Philosophy of the Spike},
journaltitle = {Frontiers in Systems Neuroscience},
shortjournal = {Front. Syst. Neurosci.},
author = {Brette, Romain},
urldate = {2022-11-16},
date = {2015-11-10},
file = {Full Text:/home/max/Zotero/storage/5T6FKJCM/Brette - 2015 - Philosophy of the Spike Rate-Based vs. Spike-Base.pdf:application/pdf},
}
@inproceedings{diehl_conversion_2016,
location = {San Diego, {CA}, {USA}},
title = {Conversion of artificial recurrent neural networks to spiking neural networks for low-power neuromorphic hardware},
isbn = {978-1-5090-1370-8},
url = {http://ieeexplore.ieee.org/document/7738691/},
doi = {10.1109/ICRC.2016.7738691},
eventtitle = {2016 {IEEE} International Conference on Rebooting Computing ({ICRC})},
pages = {1--8},
booktitle = {2016 {IEEE} International Conference on Rebooting Computing ({ICRC})},
publisher = {{IEEE}},
author = {Diehl, Peter U. and Zarrella, Guido and Cassidy, Andrew and Pedroni, Bruno U. and Neftci, Emre},
urldate = {2022-11-17},
date = {2016-10},
file = {Submitted Version:/home/max/Zotero/storage/WSYKK4UQ/Diehl et al. - 2016 - Conversion of artificial recurrent neural networks.pdf:application/pdf},
}
@inproceedings{diehl_fast-classifying_2015,
location = {Killarney, Ireland},
title = {Fast-classifying, high-accuracy spiking deep networks through weight and threshold balancing},
isbn = {978-1-4799-1960-4},
url = {http://ieeexplore.ieee.org/document/7280696/},
doi = {10.1109/IJCNN.2015.7280696},
eventtitle = {2015 International Joint Conference on Neural Networks ({IJCNN})},
pages = {1--8},
booktitle = {2015 International Joint Conference on Neural Networks ({IJCNN})},
publisher = {{IEEE}},
author = {Diehl, Peter U. and Neil, Daniel and Binas, Jonathan and Cook, Matthew and Liu, Shih-Chii and Pfeiffer, Michael},
urldate = {2022-11-17},
date = {2015-07},
}
@article{attwell_energy_2001,
title = {An Energy Budget for Signaling in the Grey Matter of the Brain},
volume = {21},
issn = {0271-678X, 1559-7016},
url = {http://journals.sagepub.com/doi/10.1097/00004647-200110000-00001},
doi = {10.1097/00004647-200110000-00001},
abstract = {Anatomic and physiologic data are used to analyze the energy expenditure on different components of excitatory signaling in the grey matter of rodent brain. Action potentials and postsynaptic effects of glutamate are predicted to consume much of the energy (47\% and 34\%, respectively), with the resting potential consuming a smaller amount (13\%), and glutamate recycling using only 3\%. Energy usage depends strongly on action potential rate—an increase in activity of 1 action potential/cortical neuron/s will raise oxygen consumption by 145 {mL}/100 g grey matter/h. The energy expended on signaling is a large fraction of the total energy used by the brain; this favors the use of energy efficient neural codes and wiring patterns. Our estimates of energy usage predict the use of distributed codes, with ≤15\% of neurons simultaneously active, to reduce energy consumption and allow greater computing power from a fixed number of neurons. Functional magnetic resonance imaging signals are likely to be dominated by changes in energy usage associated with synaptic currents and action potential propagation.},
pages = {1133--1145},
number = {10},
journaltitle = {Journal of Cerebral Blood Flow \& Metabolism},
shortjournal = {J Cereb Blood Flow Metab},
author = {Attwell, David and Laughlin, Simon B.},
urldate = {2022-11-24},
date = {2001-10},
langid = {english},
file = {Full Text:/home/max/Zotero/storage/7AL9LJ8F/Attwell and Laughlin - 2001 - An Energy Budget for Signaling in the Grey Matter .pdf:application/pdf},
}
@article{johnson_minimum-error_2016,
title = {A minimum-error, energy-constrained neural code is an instantaneous-rate code},
volume = {40},
issn = {0929-5313, 1573-6873},
url = {http://link.springer.com/10.1007/s10827-016-0592-x},
doi = {10.1007/s10827-016-0592-x},
pages = {193--206},
number = {2},
journaltitle = {Journal of Computational Neuroscience},
shortjournal = {J Comput Neurosci},
author = {Johnson, Erik C. and Jones, Douglas L. and Ratnam, Rama},
urldate = {2022-11-24},
date = {2016-04},
langid = {english},
}
@misc{vaswani_attention_2017,
title = {Attention Is All You Need},
url = {http://arxiv.org/abs/1706.03762},
abstract = {The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 {BLEU} on the {WMT} 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 {BLEU}. On the {WMT} 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art {BLEU} score of 41.8 after training for 3.5 days on eight {GPUs}, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.},
number = {{arXiv}:1706.03762},
publisher = {{arXiv}},
author = {Vaswani, Ashish and Shazeer, Noam and Parmar, Niki and Uszkoreit, Jakob and Jones, Llion and Gomez, Aidan N. and Kaiser, Lukasz and Polosukhin, Illia},
urldate = {2022-12-02},
date = {2017-12-05},
eprinttype = {arxiv},
eprint = {1706.03762 [cs]},
keywords = {Computer Science - Computation and Language, Computer Science - Machine Learning},
file = {arXiv Fulltext PDF:/home/max/Zotero/storage/X8RY86JH/Vaswani et al. - 2017 - Attention Is All You Need.pdf:application/pdf;arXiv.org Snapshot:/home/max/Zotero/storage/U9IQ5M9Z/1706.html:text/html},
}
@article{patel_applications_2007,
title = {Applications of Artificial Neural Networks in Medical Science},
volume = {2},
issn = {15748847},
url = {http://www.eurekaselect.com/openurl/content.php?genre=article&issn=1574-8847&volume=2&issue=3&spage=217},
doi = {10.2174/157488407781668811},
pages = {217--226},
number = {3},
journaltitle = {Current Clinical Pharmacology},
shortjournal = {{CCP}},
author = {Patel, Jigneshkumar and Goyal, Ramesh},
urldate = {2022-12-02},
date = {2007-09-01},
langid = {english},
}
@article{maass_networks_1997,
title = {Networks of spiking neurons: The third generation of neural network models},
volume = {10},
issn = {08936080},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0893608097000117},
doi = {10.1016/S0893-6080(97)00011-7},
shorttitle = {Networks of spiking neurons},
pages = {1659--1671},
number = {9},
journaltitle = {Neural Networks},
shortjournal = {Neural Networks},
author = {Maass, Wolfgang},
urldate = {2022-12-09},
date = {1997-12},
langid = {english},
}
@article{wu_little_2022,
title = {A Little Energy Goes a Long Way: Build an Energy-Efficient, Accurate Spiking Neural Network From Convolutional Neural Network},
volume = {16},
issn = {1662-453X},
url = {https://www.frontiersin.org/articles/10.3389/fnins.2022.759900/full},
doi = {10.3389/fnins.2022.759900},
shorttitle = {A Little Energy Goes a Long Way},
abstract = {This article conforms to a recent trend of developing an energy-efficient Spiking Neural Network ({SNN}), which takes advantage of the sophisticated training regime of Convolutional Neural Network ({CNN}) and converts a well-trained {CNN} to an {SNN}. We observe that the existing {CNN}-to-{SNN} conversion algorithms may keep a certain amount of residual current in the spiking neurons in {SNN}, and the residual current may cause significant accuracy loss when inference time is short. To deal with this, we propose a unified framework to equalize the output of the convolutional or dense layer in {CNN} and the accumulated current in {SNN}, and maximally align the spiking rate of a neuron with its corresponding charge. This framework enables us to design a novel explicit current control ({ECC}) method for the {CNN}-to-{SNN} conversion which considers multiple objectives at the same time during the conversion, including accuracy, latency, and energy efficiency. We conduct an extensive set of experiments on different neural network architectures, e.g., {VGG}, {ResNet}, and {DenseNet}, to evaluate the resulting {SNNs}. The benchmark datasets include not only the image datasets such as {CIFAR}-10/100 and {ImageNet} but also the Dynamic Vision Sensor ({DVS}) image datasets such as {DVS}-{CIFAR}-10. The experimental results show the superior performance of our {ECC} method over the state-of-the-art.},
pages = {759900},
journaltitle = {Frontiers in Neuroscience},
shortjournal = {Front. Neurosci.},
author = {Wu, Dengyu and Yi, Xinping and Huang, Xiaowei},
urldate = {2022-12-09},
date = {2022-05-26},
file = {Full Text:/home/max/Zotero/storage/EM9NFTX6/Wu et al. - 2022 - A Little Energy Goes a Long Way Build an Energy-E.pdf:application/pdf},
}
@article{clarke_circulation_1999,
title = {Circulation and energy metabolism of the brain},
pages = {637--669},
journaltitle = {Basic Neurochemistry: Molecular, Cellular, and Medical Aspects},
author = {Clarke, D.D. and Sokoloff, L.},
date = {1999},
}
@article{indiveri_importance_2019,
title = {The Importance of Space and Time for Signal Processing in Neuromorphic Agents: The Challenge of Developing Low-Power, Autonomous Agents That Interact With the Environment},
volume = {36},
issn = {1053-5888, 1558-0792},
url = {https://ieeexplore.ieee.org/document/8887553/},
doi = {10.1109/MSP.2019.2928376},
shorttitle = {The Importance of Space and Time for Signal Processing in Neuromorphic Agents},
pages = {16--28},
number = {6},
journaltitle = {{IEEE} Signal Processing Magazine},
shortjournal = {{IEEE} Signal Process. Mag.},
author = {Indiveri, Giacomo and Sandamirskaya, Yulia},
urldate = {2022-12-09},
date = {2019-11},
file = {Accepted Version:/home/max/Zotero/storage/HY4KK7VF/Indiveri and Sandamirskaya - 2019 - The Importance of Space and Time for Signal Proces.pdf:application/pdf},
}
@article{putney_precise_2019,
title = {Precise timing is ubiquitous, consistent, and coordinated across a comprehensive, spike-resolved flight motor program},
volume = {116},
url = {https://www.pnas.org/doi/10.1073/pnas.1907513116},
doi = {10.1073/pnas.1907513116},
abstract = {Sequences of action potentials, or spikes, carry information in the number of spikes and their timing. Spike timing codes are critical in many sensory systems, but there is now growing evidence that millisecond-scale changes in timing also carry information in motor brain regions, descending decision-making circuits, and individual motor units. Across all of the many signals that control a behavior, how ubiquitous, consistent, and coordinated are spike timing codes? Assessing these open questions ideally involves recording across the whole motor program with spike-level resolution. To do this, we took advantage of the relatively few motor units controlling the wings of a hawk moth, Manduca sexta. We simultaneously recorded nearly every action potential from all major wing muscles and the resulting forces in tethered flight. We found that timing encodes more information about turning behavior than spike count in every motor unit, even though there is sufficient variation in count alone. Flight muscles vary broadly in function as well as in the number and timing of spikes. Nonetheless, each muscle with multiple spikes consistently blends spike timing and count information in a 3:1 ratio. Coding strategies are consistent. Finally, we assess the coordination of muscles using pairwise redundancy measured through interaction information. Surprisingly, not only are all muscle pairs coordinated, but all coordination is accomplished almost exclusively through spike timing, not spike count. Spike timing codes are ubiquitous, consistent, and essential for coordination.},
pages = {26951--26960},
number = {52},
journaltitle = {Proceedings of the National Academy of Sciences},
author = {Putney, Joy and Conn, Rachel and Sponberg, Simon},
urldate = {2022-12-14},
date = {2019-12-26},
note = {Publisher: Proceedings of the National Academy of Sciences},
file = {Full Text PDF:/home/max/Zotero/storage/2PG6RIBS/Putney et al. - 2019 - Precise timing is ubiquitous, consistent, and coor.pdf:application/pdf},
}
@article{zhang_digital_2015,
title = {A Digital Liquid State Machine With Biologically Inspired Learning and Its Application to Speech Recognition},
volume = {26},
issn = {2162-2388},
doi = {10.1109/TNNLS.2015.2388544},
abstract = {This paper presents a bioinspired digital liquid-state machine ({LSM}) for low-power very-large-scale-integration ({VLSI})-based machine learning applications. To the best of the authors' knowledge, this is the first work that employs a bioinspired spike-based learning algorithm for the {LSM}. With the proposed online learning, the {LSM} extracts information from input patterns on the fly without needing intermediate data storage as required in offline learning methods such as ridge regression. The proposed learning rule is local such that each synaptic weight update is based only upon the firing activities of the corresponding presynaptic and postsynaptic neurons without incurring global communications across the neural network. Compared with the backpropagation-based learning, the locality of computation in the proposed approach lends itself to efficient parallel {VLSI} implementation. We use subsets of the {TI}46 speech corpus to benchmark the bioinspired digital {LSM}. To reduce the complexity of the spiking neural network model without performance degradation for speech recognition, we study the impacts of synaptic models on the fading memory of the reservoir and hence the network performance. Moreover, we examine the tradeoffs between synaptic weight resolution, reservoir size, and recognition performance and present techniques to further reduce the overhead of hardware implementation. Our simulation results show that in terms of isolated word recognition evaluated using the {TI}46 speech corpus, the proposed digital {LSM} rivals the state-of-the-art hidden Markov-model-based recognizer Sphinx-4 and outperforms all other reported recognizers including the ones that are based upon the {LSM} or neural networks.},
pages = {2635--2649},
number = {11},
journaltitle = {{IEEE} Transactions on Neural Networks and Learning Systems},
author = {Zhang, Yong and Li, Peng and Jin, Yingyezhe and Choe, Yoonsuck},
date = {2015-11},
note = {Conference Name: {IEEE} Transactions on Neural Networks and Learning Systems},
keywords = {Biological neural networks, Neurons, Hardware implementation, Hidden Markov models, liquid-state machine ({LSM}), Reservoirs, Speech, speech recognition, Speech recognition, spike-based learning, spike-based learning.},
file = {Full Text:/home/max/Zotero/storage/NTK9VLBF/Zhang et al. - 2015 - A Digital Liquid State Machine With Biologically I.pdf:application/pdf;IEEE Xplore Abstract Record:/home/max/Zotero/storage/MPWG6NDX/7024132.html:text/html},
}
@incollection{hutchison_biologically_2004,
location = {Berlin, Heidelberg},
title = {Biologically Plausible Speech Recognition with {LSTM} Neural Nets},
volume = {3141},
isbn = {978-3-540-27835-1},
url = {http://link.springer.com/10.1007/978-3-540-27835-1_10},
pages = {127--136},
booktitle = {Biologically Inspired Approaches to Advanced Information Technology},
publisher = {Springer Berlin Heidelberg},
author = {Graves, Alex and Eck, Douglas and Beringer, Nicole and Schmidhuber, Juergen},
editor = {Ijspeert, Auke Jan and Murata, Masayuki and Wakamiya, Naoki},
editorb = {Hutchison, David and Kanade, Takeo and Kittler, Josef and Kleinberg, Jon M. and Mattern, Friedemann and Mitchell, John C. and Naor, Moni and Nierstrasz, Oscar and Pandu Rangan, C. and Steffen, Bernhard and Sudan, Madhu and Terzopoulos, Demetri and Tygar, Dough and Vardi, Moshe Y. and Weikum, Gerhard},
editorbtype = {redactor},
urldate = {2022-12-14},
date = {2004},
doi = {10.1007/978-3-540-27835-1_10},
note = {Series Title: Lecture Notes in Computer Science},
}
@article{jin_performance_2017,
title = {Performance and robustness of bio-inspired digital liquid state machines: A case study of speech recognition},
volume = {226},
issn = {0925-2312},
url = {https://www.sciencedirect.com/science/article/pii/S0925231216314606},
doi = {10.1016/j.neucom.2016.11.045},
shorttitle = {Performance and robustness of bio-inspired digital liquid state machines},
abstract = {This paper presents a systematic performance and robustness study of bio-inspired digital liquid state machines ({LSMs}) for the purpose of future hardware implementation. Our work focuses not only on the study of the relation between a broad range of network parameters and performance, but also on the impact of process variability and environmental noise on the bio-inspired {LSMs} from a circuit implementation perspective. In order to shed light on the implementation of {LSMs} in digital {CMOS} technologies, we study the trade-offs between hardware overhead (i.e. precision of synaptic weights and membrane voltage and size of the reservoir) and performance. Assisted with theoretical analysis, we leverage the inherent redundancy of the targeted spiking neural networks to achieve both high performance and low hardware cost for the application of speech recognition. In addition, by modeling several types of catastrophic failure and random error, we show that the {LSMs} are generally robust. Using three subsets of the {TI}46 speech corpus to benchmark, we elucidate that in terms of isolated word recognition, the analyzed digital {LSMs} are very promising for future hardware implementation because of their low overhead, good robustness, and high recognition performance.},
pages = {145--160},
journaltitle = {Neurocomputing},
shortjournal = {Neurocomputing},
author = {Jin, Yingyezhe and Li, Peng},
urldate = {2022-12-14},
date = {2017-02-22},
langid = {english},
keywords = {Speech recognition, Liquid state machine, Performance, Robustness},
file = {ScienceDirect Full Text PDF:/home/max/Zotero/storage/4532KZ44/Jin and Li - 2017 - Performance and robustness of bio-inspired digital.pdf:application/pdf;ScienceDirect Snapshot:/home/max/Zotero/storage/W6BUXFTF/S0925231216314606.html:text/html},
}
@article{dewolf_spiking_2016,
title = {A spiking neural model of adaptive arm control},
volume = {283},
issn = {0962-8452, 1471-2954},
url = {https://royalsocietypublishing.org/doi/10.1098/rspb.2016.2134},
doi = {10.1098/rspb.2016.2134},
abstract = {We present a spiking neuron model of the motor cortices and cerebellum of the motor control system. The model consists of anatomically organized spiking neurons encompassing premotor, primary motor, and cerebellar cortices. The model proposes novel neural computations within these areas to control a nonlinear three-link arm model that can adapt to unknown changes in arm dynamics and kinematic structure. We demonstrate the mathematical stability of both forms of adaptation, suggesting that this is a robust approach for common biological problems of changing body size (e.g. during growth), and unexpected dynamic perturbations (e.g. when moving through different media, such as water or mud). To demonstrate the plausibility of the proposed neural mechanisms, we show that the model accounts for data across 19 studies of the motor control system. These data include a mix of behavioural and neural spiking activity, across subjects performing adaptive and static tasks. Given this proposed characterization of the biological processes involved in motor control of the arm, we provide several experimentally testable predictions that distinguish our model from previous work.},
pages = {20162134},
number = {1843},
journaltitle = {Proceedings of the Royal Society B: Biological Sciences},
shortjournal = {Proc. R. Soc. B.},
author = {{DeWolf}, Travis and Stewart, Terrence C. and Slotine, Jean-Jacques and Eliasmith, Chris},
urldate = {2022-12-15},
date = {2016-11-30},
langid = {english},
file = {Full Text:/home/max/Zotero/storage/ISH9VJP8/DeWolf et al. - 2016 - A spiking neural model of adaptive arm control.pdf:application/pdf},
}
@article{pfeiffer_deep_2018,
title = {Deep Learning With Spiking Neurons: Opportunities and Challenges},
volume = {12},
issn = {1662-453X},
url = {https://www.frontiersin.org/articles/10.3389/fnins.2018.00774},
shorttitle = {Deep Learning With Spiking Neurons},
abstract = {Spiking neural networks ({SNNs}) are inspired by information processing in biology, where sparse and asynchronous binary signals are communicated and processed in a massively parallel fashion. {SNNs} on neuromorphic hardware exhibit favorable properties such as low power consumption, fast inference, and event-driven information processing. This makes them interesting candidates for the efficient implementation of deep neural networks, the method of choice for many machine learning tasks. In this review, we address the opportunities that deep spiking networks offer and investigate in detail the challenges associated with training {SNNs} in a way that makes them competitive with conventional deep learning, but simultaneously allows for efficient mapping to hardware. A wide range of training methods for {SNNs} is presented, ranging from the conversion of conventional deep networks into {SNNs}, constrained training before conversion, spiking variants of backpropagation, and biologically motivated variants of {STDP}. The goal of our review is to define a categorization of {SNN} training methods, and summarize their advantages and drawbacks. We further discuss relationships between {SNNs} and binary networks, which are becoming popular for efficient digital hardware implementation. Neuromorphic hardware platforms have great potential to enable deep spiking networks in real-world applications. We compare the suitability of various neuromorphic systems that have been developed over the past years, and investigate potential use cases. Neuromorphic approaches and conventional machine learning should not be considered simply two solutions to the same classes of problems, instead it is possible to identify and exploit their task-specific advantages. Deep {SNNs} offer great opportunities to work with new types of event-based sensors, exploit temporal codes and local on-chip learning, and we have so far just scratched the surface of realizing these advantages in practical applications.},
journaltitle = {Frontiers in Neuroscience},
author = {Pfeiffer, Michael and Pfeil, Thomas},
urldate = {2022-12-15},
date = {2018},
file = {Full Text PDF:/home/max/Zotero/storage/5XMAVW44/Pfeiffer and Pfeil - 2018 - Deep Learning With Spiking Neurons Opportunities .pdf:application/pdf},
}
@article{tang_feedforward_1993,
title = {Feedforward Neural Nets as Models for Time Series Forecasting},
volume = {5},
issn = {0899-1499, 2326-3245},
url = {http://pubsonline.informs.org/doi/10.1287/ijoc.5.4.374},
doi = {10.1287/ijoc.5.4.374},
abstract = {We have studied neural networks as models for time series forecasting, and our research compares the Box-Jenkins method against the neural network method for long and short term memory series. Our work was inspired by previously published works that yielded inconsistent results about comparative performance. We have since experimented with 16 time series of differing complexity using neural networks. The performance of the neural networks is compared with that of the Box-Jenkins method. Our experiments indicate that for time series with long memory, both methods produced comparable results. However, for series with short memory, neural networks outperformed the Box-Jenkins model. Because neural networks can be easily built for multiple-step-ahead forecasting, they may present a better long term forecast model than the Box-Jenkins method. We discussed the representation ability, the model building process and the applicability of the neural net approach. Neural networks appear to provide a promising alternative for time series forecasting.
{INFORMS} Journal on Computing, {ISSN} 1091-9856, was published as {ORSA} Journal on Computing from 1989 to 1995 under {ISSN} 0899-1499.},
pages = {374--385},
number = {4},
journaltitle = {{ORSA} Journal on Computing},
shortjournal = {{ORSA} Journal on Computing},
author = {Tang, Zaiyong and Fishwick, Paul A.},
urldate = {2023-02-02},
date = {1993-11},
langid = {english},
}
@article{yang_cascade_2022,
title = {Cascade Forward Artificial Neural Network based Behavioral Predicting Approach for the Integrated Satellite-terrestrial Networks},
volume = {27},
issn = {1383-469X, 1572-8153},
url = {https://link.springer.com/10.1007/s11036-021-01875-6},
doi = {10.1007/s11036-021-01875-6},
abstract = {Abstract
In order to reduce the risk of authorized users being interrupted in the cognitive satellite wireless network, a multi-step prediction approach based on a cascaded forward artificial neural network is proposed to predict user behavior in the designed scenario. This approach uses the powerful learning ability of the cascaded forward network to analyze the historical spectrum occupancy records of licensed users, and then predict the user behavior in the next few time slots. The prediction result can help the base station in the cognitive network to schedule the dynamic access process of the cognitive users, and reduce the interference caused by the cognitive user to the authorized users. Finally, compared with traditional prediction algorithms, it is verified that the proposed multi-step prediction algorithm can effectively reduce the probability of spectrum conflicts.},
pages = {1569--1577},
number = {4},
journaltitle = {Mobile Networks and Applications},
shortjournal = {Mobile Netw Appl},
author = {Yang, Mingchuan and Xie, Bingyu and Dou, Yingzhe and Xue, Guanchang},
urldate = {2023-02-02},
date = {2022-08},
langid = {english},
file = {Full Text:/home/max/Zotero/storage/P7GGL3IW/Yang et al. - 2022 - Cascade Forward Artificial Neural Network based Be.pdf:application/pdf},
}
@article{uncini_audio_2003,
title = {Audio signal processing by neural networks},
volume = {55},
issn = {09252312},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0925231203003953},
doi = {10.1016/S0925-2312(03)00395-3},
pages = {593--625},
number = {3},
journaltitle = {Neurocomputing},
shortjournal = {Neurocomputing},
author = {Uncini, Aurelio},
urldate = {2023-02-02},
date = {2003-10},
langid = {english},
}
@article{jaeger_echo_2010,
title = {The “echo state” approach to analysing and training recurrent neural networks – with an Erratum note},
author = {Jaeger, Herbert},
date = {2010},
langid = {english},
file = {Jaeger - The “echo state” approach to analysing and trainin.pdf:/home/max/Zotero/storage/RE83F7JW/Jaeger - The “echo state” approach to analysing and trainin.pdf:application/pdf},
}
@article{bengio_learning_1994,
title = {Learning long-term dependencies with gradient descent is difficult},
volume = {5},
issn = {1045-9227, 1941-0093},
url = {https://ieeexplore.ieee.org/document/279181/},
doi = {10.1109/72.279181},
pages = {157--166},
number = {2},
journaltitle = {{IEEE} Transactions on Neural Networks},
shortjournal = {{IEEE} Trans. Neural Netw.},
author = {Bengio, Y. and Simard, P. and Frasconi, P.},
urldate = {2023-02-07},
date = {1994-03},
}
@misc{ioffe_batch_2015,
title = {Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift},
url = {http://arxiv.org/abs/1502.03167},
shorttitle = {Batch Normalization},
abstract = {Training Deep Neural Networks is complicated by the fact that the distribution of each layer's inputs changes during training, as the parameters of the previous layers change. This slows down the training by requiring lower learning rates and careful parameter initialization, and makes it notoriously hard to train models with saturating nonlinearities. We refer to this phenomenon as internal covariate shift, and address the problem by normalizing layer inputs. Our method draws its strength from making normalization a part of the model architecture and performing the normalization for each training mini-batch. Batch Normalization allows us to use much higher learning rates and be less careful about initialization. It also acts as a regularizer, in some cases eliminating the need for Dropout. Applied to a state-of-the-art image classification model, Batch Normalization achieves the same accuracy with 14 times fewer training steps, and beats the original model by a significant margin. Using an ensemble of batch-normalized networks, we improve upon the best published result on {ImageNet} classification: reaching 4.9\% top-5 validation error (and 4.8\% test error), exceeding the accuracy of human raters.},
number = {{arXiv}:1502.03167},
publisher = {{arXiv}},
author = {Ioffe, Sergey and Szegedy, Christian},
urldate = {2023-02-07},
date = {2015-03-02},
eprinttype = {arxiv},
eprint = {1502.03167 [cs]},
keywords = {Computer Science - Machine Learning},
file = {arXiv Fulltext PDF:/home/max/Zotero/storage/KXL5WXYN/Ioffe and Szegedy - 2015 - Batch Normalization Accelerating Deep Network Tra.pdf:application/pdf;arXiv.org Snapshot:/home/max/Zotero/storage/L7Q7LSX3/1502.html:text/html},
}
@article{nair_rectified_2010,
title = {Rectified Linear Units Improve Restricted Boltzmann Machines},
abstract = {Restricted Boltzmann machines were developed using binary stochastic hidden units. These can be generalized by replacing each binary unit by an infinite number of copies that all have the same weights but have progressively more negative biases. The learning and inference rules for these “Stepped Sigmoid Units” are unchanged. They can be approximated efficiently by noisy, rectified linear units. Compared with binary units, these units learn features that are better for object recognition on the {NORB} dataset and face verification on the Labeled Faces in the Wild dataset. Unlike binary units, rectified linear units preserve information about relative intensities as information travels through multiple layers of feature detectors.},
author = {Nair, Vinod and Hinton, Geoffrey E},
date = {2010},
langid = {english},
file = {Nair and Hinton - Rectified Linear Units Improve Restricted Boltzman.pdf:/home/max/Zotero/storage/UHD34AEC/Nair and Hinton - Rectified Linear Units Improve Restricted Boltzman.pdf:application/pdf},
}
@inproceedings{nair_rectified_2010-1,
location = {Madison, {WI}, {USA}},
title = {Rectified linear units improve restricted boltzmann machines},
isbn = {978-1-60558-907-7},
series = {{ICML}'10},
abstract = {Restricted Boltzmann machines were developed using binary stochastic hidden units. These can be generalized by replacing each binary unit by an infinite number of copies that all have the same weights but have progressively more negative biases. The learning and inference rules for these "Stepped Sigmoid Units" are unchanged. They can be approximated efficiently by noisy, rectified linear units. Compared with binary units, these units learn features that are better for object recognition on the {NORB} dataset and face verification on the Labeled Faces in the Wild dataset. Unlike binary units, rectified linear units preserve information about relative intensities as information travels through multiple layers of feature detectors.},
pages = {807--814},
booktitle = {Proceedings of the 27th International Conference on International Conference on Machine Learning},
publisher = {Omnipress},
author = {Nair, Vinod and Hinton, Geoffrey E.},
urldate = {2023-02-07},
date = {2010-06-21},
}
@misc{pascanu_difficulty_2013,
title = {On the difficulty of training Recurrent Neural Networks},
url = {http://arxiv.org/abs/1211.5063},
abstract = {There are two widely known issues with properly training Recurrent Neural Networks, the vanishing and the exploding gradient problems detailed in Bengio et al. (1994). In this paper we attempt to improve the understanding of the underlying issues by exploring these problems from an analytical, a geometric and a dynamical systems perspective. Our analysis is used to justify a simple yet effective solution. We propose a gradient norm clipping strategy to deal with exploding gradients and a soft constraint for the vanishing gradients problem. We validate empirically our hypothesis and proposed solutions in the experimental section.},
number = {{arXiv}:1211.5063},
publisher = {{arXiv}},
author = {Pascanu, Razvan and Mikolov, Tomas and Bengio, Yoshua},
urldate = {2023-02-07},
date = {2013-02-15},
eprinttype = {arxiv},
eprint = {1211.5063 [cs]},
keywords = {Computer Science - Machine Learning},
file = {arXiv Fulltext PDF:/home/max/Zotero/storage/XYSL9QF2/Pascanu et al. - 2013 - On the difficulty of training Recurrent Neural Net.pdf:application/pdf;arXiv.org Snapshot:/home/max/Zotero/storage/94BRC7T2/1211.html:text/html},
}
@article{hochreiter_long_1997,
title = {Long Short-Term Memory},
volume = {9},
issn = {0899-7667, 1530-888X},
url = {https://direct.mit.edu/neco/article/9/8/1735-1780/6109},
doi = {10.1162/neco.1997.9.8.1735},
abstract = {Learning to store information over extended time intervals by recurrent backpropagation takes a very long time, mostly because of insufficient, decaying error backflow. We briefly review Hochreiter's (1991) analysis of this problem, then address it by introducing a novel, efficient, gradient based method called long short-term memory ({LSTM}). Truncating the gradient where this does not do harm, {LSTM} can learn to bridge minimal time lags in excess of 1000 discrete-time steps by enforcing constant error flow through constant error carousels within special units. Multiplicative gate units learn to open and close access to the constant error flow. {LSTM} is local in space and time; its computational complexity per time step and weight is O. 1. Our experiments with artificial data involve local, distributed, real-valued, and noisy pattern representations. In comparisons with real-time recurrent learning, back propagation through time, recurrent cascade correlation, Elman nets, and neural sequence chunking, {LSTM} leads to many more successful runs, and learns much faster. {LSTM} also solves complex, artificial long-time-lag tasks that have never been solved by previous recurrent network algorithms.},
pages = {1735--1780},
number = {8},
journaltitle = {Neural Computation},
shortjournal = {Neural Computation},
author = {Hochreiter, Sepp and Schmidhuber, Jürgen},
urldate = {2023-02-07},
date = {1997-11-01},
langid = {english},
}
@inproceedings{mayer_system_2006,
location = {Beijing, China},
title = {A System for Robotic Heart Surgery that Learns to Tie Knots Using Recurrent Neural Networks},
isbn = {978-1-4244-0258-8},
url = {http://ieeexplore.ieee.org/document/4059310/},
doi = {10.1109/IROS.2006.282190},
eventtitle = {2006 {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems},
pages = {543--548},
booktitle = {2006 {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems},
publisher = {{IEEE}},
author = {Mayer, Hermann and Gomez, Faustino and Wierstra, Daan and Nagy, Istvan and Knoll, Alois and Schmidhuber, Jurgen},
urldate = {2023-02-07},
date = {2006-10},
file = {Full Text:/home/max/Zotero/storage/GPCYE6KH/Mayer et al. - 2006 - A System for Robotic Heart Surgery that Learns to .pdf:application/pdf},
}
@misc{sak_long_2014,
title = {Long Short-Term Memory Based Recurrent Neural Network Architectures for Large Vocabulary Speech Recognition},
url = {http://arxiv.org/abs/1402.1128},
abstract = {Long Short-Term Memory ({LSTM}) is a recurrent neural network ({RNN}) architecture that has been designed to address the vanishing and exploding gradient problems of conventional {RNNs}. Unlike feedforward neural networks, {RNNs} have cyclic connections making them powerful for modeling sequences. They have been successfully used for sequence labeling and sequence prediction tasks, such as handwriting recognition, language modeling, phonetic labeling of acoustic frames. However, in contrast to the deep neural networks, the use of {RNNs} in speech recognition has been limited to phone recognition in small scale tasks. In this paper, we present novel {LSTM} based {RNN} architectures which make more effective use of model parameters to train acoustic models for large vocabulary speech recognition. We train and compare {LSTM}, {RNN} and {DNN} models at various numbers of parameters and configurations. We show that {LSTM} models converge quickly and give state of the art speech recognition performance for relatively small sized models.},
number = {{arXiv}:1402.1128},
publisher = {{arXiv}},
author = {Sak, Haşim and Senior, Andrew and Beaufays, Françoise},
urldate = {2023-02-07},
date = {2014-02-05},
eprinttype = {arxiv},
eprint = {1402.1128 [cs, stat]},
keywords = {Computer Science - Neural and Evolutionary Computing, Computer Science - Computation and Language, Computer Science - Machine Learning, Statistics - Machine Learning},
file = {arXiv Fulltext PDF:/home/max/Zotero/storage/NP8NKTXA/Sak et al. - 2014 - Long Short-Term Memory Based Recurrent Neural Netw.pdf:application/pdf;arXiv.org Snapshot:/home/max/Zotero/storage/29VFVQCU/1402.html:text/html},
}
@misc{li_constructing_2015,
title = {Constructing Long Short-Term Memory based Deep Recurrent Neural Networks for Large Vocabulary Speech Recognition},
url = {http://arxiv.org/abs/1410.4281},
abstract = {Long short-term memory ({LSTM}) based acoustic modeling methods have recently been shown to give state-of-the-art performance on some speech recognition tasks. To achieve a further performance improvement, in this research, deep extensions on {LSTM} are investigated considering that deep hierarchical model has turned out to be more efficient than a shallow one. Motivated by previous research on constructing deep recurrent neural networks ({RNNs}), alternative deep {LSTM} architectures are proposed and empirically evaluated on a large vocabulary conversational telephone speech recognition task. Meanwhile, regarding to multi-{GPU} devices, the training process for {LSTM} networks is introduced and discussed. Experimental results demonstrate that the deep {LSTM} networks benefit from the depth and yield the state-of-the-art performance on this task.},
number = {{arXiv}:1410.4281},
publisher = {{arXiv}},
author = {Li, Xiangang and Wu, Xihong},
urldate = {2023-02-07},
date = {2015-05-10},
eprinttype = {arxiv},
eprint = {1410.4281 [cs]},
keywords = {Computer Science - Neural and Evolutionary Computing, Computer Science - Computation and Language},
file = {arXiv Fulltext PDF:/home/max/Zotero/storage/N9MA6I2G/Li and Wu - 2015 - Constructing Long Short-Term Memory based Deep Rec.pdf:application/pdf;arXiv.org Snapshot:/home/max/Zotero/storage/9999EC4L/1410.html:text/html},
}
@book{goodfellow_deep_2016,
location = {Cambridge, Massachusetts},
title = {Deep learning},
isbn = {978-0-262-03561-3},
series = {Adaptive computation and machine learning},
pagetotal = {775},
publisher = {The {MIT} Press},
author = {Goodfellow, Ian and Bengio, Yoshua and Courville, Aaron},
date = {2016},
keywords = {Machine learning},
}
@article{nielsen_neural_2015,
title = {Neural Networks and Deep Learning},
url = {http://neuralnetworksanddeeplearning.com},
author = {Nielsen, Michael A.},
urldate = {2023-02-10},
date = {2015},
langid = {english},
note = {Publisher: Determination Press},
file = {Snapshot:/home/max/Zotero/storage/AZGCN7FC/neuralnetworksanddeeplearning.com.html:text/html},
}
@article{abdolrasol_artificial_2021,
title = {Artificial Neural Networks Based Optimization Techniques: A Review},
volume = {10},
issn = {2079-9292},
url = {https://www.mdpi.com/2079-9292/10/21/2689},
doi = {10.3390/electronics10212689},
shorttitle = {Artificial Neural Networks Based Optimization Techniques},
abstract = {In the last few years, intensive research has been done to enhance artificial intelligence ({AI}) using optimization techniques. In this paper, we present an extensive review of artificial neural networks ({ANNs}) based optimization algorithm techniques with some of the famous optimization techniques, e.g., genetic algorithm ({GA}), particle swarm optimization ({PSO}), artificial bee colony ({ABC}), and backtracking search algorithm ({BSA}) and some modern developed techniques, e.g., the lightning search algorithm ({LSA}) and whale optimization algorithm ({WOA}), and many more. The entire set of such techniques is classified as algorithms based on a population where the initial population is randomly created. Input parameters are initialized within the specified range, and they can provide optimal solutions. This paper emphasizes enhancing the neural network via optimization algorithms by manipulating its tuned parameters or training parameters to obtain the best structure network pattern to dissolve the problems in the best way. This paper includes some results for improving the {ANN} performance by {PSO}, {GA}, {ABC}, and {BSA} optimization techniques, respectively, to search for optimal parameters, e.g., the number of neurons in the hidden layers and learning rate. The obtained neural net is used for solving energy management problems in the virtual power plant system.},
pages = {2689},
number = {21},
journaltitle = {Electronics},
shortjournal = {Electronics},
author = {Abdolrasol, Maher G. M. and Hussain, S. M. Suhail and Ustun, Taha Selim and Sarker, Mahidur R. and Hannan, Mahammad A. and Mohamed, Ramizi and Ali, Jamal Abd and Mekhilef, Saad and Milad, Abdalrhman},
urldate = {2023-02-10},
date = {2021-11-03},
langid = {english},
file = {Full Text:/home/max/Zotero/storage/NDZS3FC3/Abdolrasol et al. - 2021 - Artificial Neural Networks Based Optimization Tech.pdf:application/pdf},
}
@misc{sun_survey_2019,
title = {A Survey of Optimization Methods from a Machine Learning Perspective},
url = {http://arxiv.org/abs/1906.06821},
abstract = {Machine learning develops rapidly, which has made many theoretical breakthroughs and is widely applied in various fields. Optimization, as an important part of machine learning, has attracted much attention of researchers. With the exponential growth of data amount and the increase of model complexity, optimization methods in machine learning face more and more challenges. A lot of work on solving optimization problems or improving optimization methods in machine learning has been proposed successively. The systematic retrospect and summary of the optimization methods from the perspective of machine learning are of great significance, which can offer guidance for both developments of optimization and machine learning research. In this paper, we first describe the optimization problems in machine learning. Then, we introduce the principles and progresses of commonly used optimization methods. Next, we summarize the applications and developments of optimization methods in some popular machine learning fields. Finally, we explore and give some challenges and open problems for the optimization in machine learning.},
number = {{arXiv}:1906.06821},
publisher = {{arXiv}},
author = {Sun, Shiliang and Cao, Zehui and Zhu, Han and Zhao, Jing},
urldate = {2023-02-10},
date = {2019-10-23},
eprinttype = {arxiv},
eprint = {1906.06821 [cs, math, stat]},
keywords = {Computer Science - Machine Learning, Statistics - Machine Learning, Mathematics - Optimization and Control},
file = {arXiv Fulltext PDF:/home/max/Zotero/storage/4S8GNPFA/Sun et al. - 2019 - A Survey of Optimization Methods from a Machine Le.pdf:application/pdf;arXiv.org Snapshot:/home/max/Zotero/storage/X3GTA4UP/1906.html:text/html},
}
@article{lee_training_2016,
title = {Training Deep Spiking Neural Networks Using Backpropagation},
volume = {10},
issn = {1662-453X},
url = {http://journal.frontiersin.org/article/10.3389/fnins.2016.00508/full},
doi = {10.3389/fnins.2016.00508},
journaltitle = {Frontiers in Neuroscience},
shortjournal = {Front. Neurosci.},
author = {Lee, Jun Haeng and Delbruck, Tobi and Pfeiffer, Michael},
urldate = {2023-02-10},
date = {2016-11-08},
file = {Full Text:/home/max/Zotero/storage/GKTG26I9/Lee et al. - 2016 - Training Deep Spiking Neural Networks Using Backpr.pdf:application/pdf},
}
@inproceedings{li_iterative_2004,
title = {Iterative Linear Quadratic Regulator Design for Nonlinear Biological Movement Systems.},
volume = {1},
pages = {222--229},
booktitle = {Proceedings of the 1st International Conference on Informatics in Control, Automation and Robotics, ({ICINCO} 2004)},
author = {Li, Weiwei and Todorov, Emanuel},
date = {2004-01},
}
@article{demin_recurrent_2018,
title = {Recurrent Spiking Neural Network Learning Based on a Competitive Maximization of Neuronal Activity},
volume = {12},
issn = {1662-5196},
url = {https://www.frontiersin.org/article/10.3389/fninf.2018.00079/full},
doi = {10.3389/fninf.2018.00079},
pages = {79},
journaltitle = {Frontiers in Neuroinformatics},
shortjournal = {Front. Neuroinform.},
author = {Demin, Vyacheslav and Nekhaev, Dmitry},
urldate = {2023-03-20},
date = {2018-11-15},
file = {Full Text:/home/max/Zotero/storage/TR9QZZ74/Demin and Nekhaev - 2018 - Recurrent Spiking Neural Network Learning Based on.pdf:application/pdf},
}
@article{yi_learning_2023,
title = {Learning rules in spiking neural networks: A survey},
volume = {531},
issn = {09252312},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0925231223001662},
doi = {10.1016/j.neucom.2023.02.026},
shorttitle = {Learning rules in spiking neural networks},
pages = {163--179},
journaltitle = {Neurocomputing},
shortjournal = {Neurocomputing},
author = {Yi, Zexiang and Lian, Jing and Liu, Qidong and Zhu, Hegui and Liang, Dong and Liu, Jizhao},
urldate = {2023-03-20},
date = {2023-04},
langid = {english},
}
@article{guo_neural_2021,
title = {Neural Coding in Spiking Neural Networks: A Comparative Study for Robust Neuromorphic Systems},
volume = {15},
issn = {1662-453X},
url = {https://www.frontiersin.org/articles/10.3389/fnins.2021.638474/full},
doi = {10.3389/fnins.2021.638474},
shorttitle = {Neural Coding in Spiking Neural Networks},
abstract = {Various hypotheses of information representation in brain, referred to as neural codes, have been proposed to explain the information transmission between neurons. Neural coding plays an essential role in enabling the brain-inspired spiking neural networks ({SNNs}) to perform different tasks. To search for the best coding scheme, we performed an extensive comparative study on the impact and performance of four important neural coding schemes, namely, rate coding, time-to-first spike ({TTFS}) coding, phase coding, and burst coding. The comparative study was carried out using a biological 2-layer {SNN} trained with an unsupervised spike-timing-dependent plasticity ({STDP}) algorithm. Various aspects of network performance were considered, including classification accuracy, processing latency, synaptic operations ({SOPs}), hardware implementation, network compression efficacy, input and synaptic noise resilience, and synaptic fault tolerance. The classification tasks on Modified National Institute of Standards and Technology ({MNIST}) and Fashion-{MNIST} datasets were applied in our study. For hardware implementation, area and power consumption were estimated for these coding schemes, and the network compression efficacy was analyzed using pruning and quantization techniques. Different types of input noise and noise variations in the datasets were considered and applied. Furthermore, the robustness of each coding scheme to the non-ideality-induced synaptic noise and fault in analog neuromorphic systems was studied and compared. Our results show that {TTFS} coding is the best choice in achieving the highest computational performance with very low hardware implementation overhead. {TTFS} coding requires 4x/7.5x lower processing latency and 3.5x/6.5x fewer {SOPs} than rate coding during the training/inference process. Phase coding is the most resilient scheme to input noise. Burst coding offers the highest network compression efficacy and the best overall robustness to hardware non-idealities for both training and inference processes. The study presented in this paper reveals the design space created by the choice of each coding scheme, allowing designers to frame each scheme in terms of its strength and weakness given a designs’ constraints and considerations in neuromorphic systems.},
pages = {638474},
journaltitle = {Frontiers in Neuroscience},
shortjournal = {Front. Neurosci.},
author = {Guo, Wenzhe and Fouda, Mohammed E. and Eltawil, Ahmed M. and Salama, Khaled Nabil},
urldate = {2023-03-20},
date = {2021-03-04},
file = {Full Text:/home/max/Zotero/storage/JTV2TJ7M/Guo et al. - 2021 - Neural Coding in Spiking Neural Networks A Compar.pdf:application/pdf},
}
@article{hodgkin_currents_1952,
title = {Currents carried by sodium and potassium ions through the membrane of the giant axon of \textit{Loligo}},
volume = {116},
issn = {0022-3751, 1469-7793},
url = {https://onlinelibrary.wiley.com/doi/10.1113/jphysiol.1952.sp004717},
doi = {10.1113/jphysiol.1952.sp004717},
pages = {449--472},
number = {4},
journaltitle = {The Journal of Physiology},
shortjournal = {The Journal of Physiology},
author = {Hodgkin, A. L. and Huxley, A. F.},
urldate = {2023-03-21},
date = {1952-04-28},
langid = {english},
file = {Full Text:/home/max/Zotero/storage/XBAWFI9K/Hodgkin and Huxley - 1952 - Currents carried by sodium and potassium ions thro.pdf:application/pdf},
}
@article{feldman_spike-timing_2012,
title = {The Spike-Timing Dependence of Plasticity},
volume = {75},
issn = {08966273},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0896627312007039},
doi = {10.1016/j.neuron.2012.08.001},
pages = {556--571},
number = {4},
journaltitle = {Neuron},
shortjournal = {Neuron},
author = {Feldman, Daniel E.},
urldate = {2023-03-21},
date = {2012-08},