-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathFastMotionDetection.bib
1710 lines (1598 loc) · 171 KB
/
FastMotionDetection.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
%% This BibTeX bibliography file was created using BibDesk.
%% https://bibdesk.sourceforge.io/
%% Created for Laurent Perrinet at 2023-09-25 23:15:05 +0200
%% Saved with string encoding Unicode (UTF-8)
@article{abeles_role_1982,
author = {Abeles, Moshe},
journal = {Israel journal of medical sciences},
keywords = {\#nosource, ⛔ No DOI found, ⛔ No INSPIRE recid found},
number = {1},
pages = {83--92},
title = {Role of the cortical neuron: integrator or coincidence detector?},
volume = {18},
year = {1982}}
@article{barlow_unsupervised_1989,
abstract = {What use can the brain make of the massive flow of sensory information that occurs without any associated rewards or punishments? This question is reviewed in the light of connectionist models of unsupervised learning and some older ideas, namely the cognitive maps and working models of Tolman and Craik, and the idea that redundancy is important for understanding perception (Attneave 1954), the physiology of sensory pathways (Barlow 1959), and pattern recognition (Watanabe 1960). It is argued that (1) The redundancy of sensory messages provides the knowledge incorporated in the maps or models. (2) Some of this knowledge can be obtained by observations of mean, variance, and covariance of sensory messages, and perhaps also by a method called ``minimum entropy coding.'' (3) Such knowledge may be incorporated in a model of ``what usually happens'' with which incoming messages are automatically compared, enabling unexpected discrepancies to be immediately identified. (4) Knowledge of the sort incorporated into such a filter is a necessary prerequisite of ordinary learning, and a representation whose elements are independent makes it possible to form associations with logical functions of the elements, not just with the elements themselves.},
author = {Barlow, H.B.},
doi = {10.1162/neco.1989.1.3.295},
issn = {0899-7667},
journal = {Neural Computation},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
month = sep,
number = {3},
pages = {295--311},
title = {Unsupervised {Learning}},
url = {https://doi.org/10.1162/neco.1989.1.3.295},
urldate = {2022-09-15},
volume = {1},
year = {1989},
bdsk-url-1 = {https://doi.org/10.1162/neco.1989.1.3.295}}
@article{baudot_animation_2013,
abstract = {Synaptic noise is thought to be a limiting factor for computational efficiency in the brain. In visual cortex (V1), ongoing activity is present in vivo, and spiking responses to simple stimuli are highly unreliable across trials. Stimulus statistics used to plot receptive fields, however, are quite different from those experienced during natural visuomotor exploration. We recorded V1 neurons intracellularly in the anaesthetized and paralyzed cat and compared their spiking and synaptic responses to full field natural images animated by simulated eye-movements to those evoked by simpler (grating) or higher dimensionality statistics (dense noise). In most cells, natural scene animation was the only condition where high temporal precision (in the 10--20 ms range) was maintained during sparse and reliable activity. At the subthreshold level, irregular but highly reproducible membrane potential dynamics were observed, even during long (several 100 ms) ``spike-less'' periods. We showed that both the spatial structure of natural scenes and the temporal dynamics of eye-movements increase the signal-to-noise ratio by a non-linear amplification of the signal combined with a reduction of the subthreshold contextual noise. These data support the view that the sparsening and the time precision of the neural code in V1 may depend primarily on three factors: (1) broadband input spectrum: the bandwidth must be rich enough for recruiting optimally the diversity of spatial and time constants during recurrent processing; (2) tight temporal interplay of excitation and inhibition: conductance measurements demonstrate that natural scene statistics narrow selectively the duration of the spiking opportunity window during which the balance between excitation and inhibition changes transiently and reversibly; (3) signal energy in the lower frequency band: a minimal level of power is needed below 10 Hz to reach consistently the spiking threshold, a situation rarely reached with visual dense noise.},
author = {Baudot, Pierre and Levy, Manuel and Marre, Olivier and Monier, Cyril and Pananceau, Marc and Fr{\'e}gnac, Yves},
doi = {10.3389/fncir.2013.00206},
issn = {1662-5110},
journal = {Frontiers in Neural Circuits},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
title = {Animation of natural scene by virtual eye-movements evokes high precision and low noise in {V1} neurons},
url = {https://www.frontiersin.org/articles/10.3389/fncir.2013.00206},
urldate = {2022-09-29},
volume = {7},
year = {2013},
bdsk-url-1 = {https://www.frontiersin.org/articles/10.3389/fncir.2013.00206},
bdsk-url-2 = {https://doi.org/10.3389/fncir.2013.00206}}
@article{benosman_asynchronous_2012,
abstract = {This paper introduces a process to compute optical flow using an asynchronous event-based retina at high speed and low computational load. A new generation of artificial vision sensors has now started to rely on biologically inspired designs for light acquisition. Biological retinas, and their artificial counterparts, are totally asynchronous and data driven and rely on a paradigm of light acquisition radically different from most of the currently used frame-grabber technologies. This paper introduces a framework for processing visual data using asynchronous event-based acquisition, providing a method for the evaluation of optical flow. The paper shows that current limitations of optical flow computation can be overcome by using event-based visual acquisition, where high data sparseness and high temporal resolution permit the computation of optical flow with micro-second accuracy and at very low computational cost.},
author = {Benosman, Ryad},
doi = {10.1016/j.neunet.2011.11.001},
journal = {Neural Networks},
keywords = {\#nosource, Asynchronous acquisition, Event-based vision, Frameless vision, Optical flow, Spikes, Temporal dynamics, ⛔ No INSPIRE recid found},
language = {english},
pages = {6},
title = {Asynchronous frameless event-based optical flow},
url = {https://doi.org/10/b55t75},
volume = {27},
year = {2012},
bdsk-url-1 = {https://doi.org/10/b55t75},
bdsk-url-2 = {https://doi.org/10.1016/j.neunet.2011.11.001}}
@article{benosman_event-based_2014,
abstract = {This paper introduces a new methodology to compute dense visual flow using the precise timings of spikes from an asynchronous event-based retina. Biological retinas, and their artificial counterparts, are totally asynchronous and data-driven and rely on a paradigm of light acquisition radically different from most of the currently used frame-grabber technologies. This paper introduces a framework to estimate visual flow from the local properties of events' spatiotemporal space. We will show that precise visual flow orientation and amplitude can be estimated using a local differential approach on the surface defined by coactive events. Experimental results are presented; they show the method adequacy with high data sparseness and temporal resolution of event-based acquisition that allows the computation of motion flow with microsecond accuracy and at very low computational cost.},
author = {Benosman, Ryad and Clercq, Charles and Lagorce, Xavier and {Sio-Hoi Ieng} and Bartolozzi, Chiara},
doi = {10.1109/tnnls.2013.2273537},
issn = {2162-237X, 2162-2388},
journal = {IEEE Transactions on Neural Networks and Learning Systems},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
language = {en},
month = feb,
number = {2},
pages = {407--417},
title = {Event-{Based} {Visual} {Flow}},
url = {https://www.neuromorphic-vision.com/public/publications/3/publication.pdf},
urldate = {2022-02-01},
volume = {25},
year = {2014},
bdsk-url-1 = {https://www.neuromorphic-vision.com/public/publications/3/publication.pdf},
bdsk-url-2 = {https://doi.org/10.1109/tnnls.2013.2273537}}
@article{Benvenuti2020,
abstract = {What are the neural mechanisms underlying motion integration of translating objects? Visual motion integration is generally conceived of as a feedforward, hierarchical, information processing. However, feedforward models fail to account for many contextual effects revealed using natural moving stimuli. In particular, a translating object evokes a sequence of transient feedforward responses in the primary visual cortex but also propagations of activity through horizontal and feedback pathways. We investigated how these pathways shape the representation of a translating bar in monkey V1. We show that, for long trajectories, spiking activity builds-up hundreds of milliseconds before the bar enters the neurons receptive fields. Using VSDI and LFP recordings guided by a phenomenological model of propagation dynamics, we demonstrate that this anticipatory response arises from the interplay between horizontal and feedback networks driving V1 neurons well ahead of their feedforward inputs. This mechanism could subtend several perceptual contextual effects observed with translating objects.},
author = {Benvenuti, Giacomo and Chemla, Sandrine and Boonman, Arjan and Perrinet, Laurent U and Masson, Guillaume S and Chavane, Frederic},
doi = {10.1101/2020.03.26.010017},
journal = {bioRxiv : the preprint server for biology},
keywords = {⛔ No INSPIRE recid found},
language = {english},
month = mar,
note = {tex.copyright: All rights reserved},
title = {Anticipatory responses along motion trajectories in awake monkey area {V1}},
url = {https://www.biorxiv.org/content/10.1101/2020.03.26.010017v1},
urldate = {2020-03-31},
year = {2020},
bdsk-url-1 = {https://www.biorxiv.org/content/10.1101/2020.03.26.010017v1},
bdsk-url-2 = {https://doi.org/10.1101/2020.03.26.010017}}
@article{berens_fast_2012,
abstract = {Orientation tuning has been a classic model for understanding single-neuron computation in the neocortex. However, little is known about how orientation can be read out from the activity of neural populations, in particular in alert animals. Our study is a first step toward that goal. We recorded from up to 20 well isolated single neurons in the primary visual cortex of alert macaques simultaneously and applied a simple, neurally plausible decoder to read out the population code. We focus on two questions: First, what are the time course and the timescale at which orientation can be read out from the population response? Second, how complex does the decoding mechanism in a downstream neuron have to be to reliably discriminate between visual stimuli with different orientations? We show that the neural ensembles in primary visual cortex of awake macaques represent orientation in a way that facilitates a fast and simple readout mechanism: With an average latency of 30--80 ms, the population code can be read out instantaneously with a short integration time of only tens of milliseconds, and neither stimulus contrast nor correlations need to be taken into account to compute the optimal synaptic weight pattern. Our study shows that---similar to the case of single-neuron computation---the representation of orientation in the spike patterns of neural populations can serve as an exemplary case for understanding the computations performed by neural ensembles underlying visual processing during behavior.},
author = {Berens, Philipp and Ecker, Alexander S. and Cotton, R. James and Ma, Wei Ji and Bethge, Matthias and Tolias, Andreas S.},
copyright = {Copyright {\copyright} 2012 the authors 0270-6474/12/3210618-09\$15.00/0. This article is freely available online through the J Neurosci Open Choice option.},
doi = {10.1523/jneurosci.1335-12.2012},
issn = {0270-6474, 1529-2401},
journal = {Journal of Neuroscience},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
language = {en},
month = aug,
note = {00000 tex.ids= Berens12a publisher: Society for Neuroscience section: Articles},
number = {31},
pages = {10618--10626},
pmid = {22855811},
title = {A {Fast} and {Simple} {Population} {Code} for {Orientation} in {Primate} {V1}},
url = {https://www.jneurosci.org/content/32/31/10618},
urldate = {2020-11-09},
volume = {32},
year = {2012},
bdsk-url-1 = {https://www.jneurosci.org/content/32/31/10618},
bdsk-url-2 = {https://doi.org/10.1523/jneurosci.1335-12.2012}}
@article{bernert_attention-based_2018,
abstract = {Bio-inspired computing using artificial spiking neural networks promises performances outperforming currently available computational approaches. Yet, the number of applications of such networks remains limited due to the absence of generic training procedures for complex pattern recognition, which require the design of dedicated architectures for each situation. We developed a spike-timing-dependent plasticity (STDP) spiking neural network (SSN) to address spike-sorting, a central pattern recognition problem in neuroscience. This network is designed to process an extracellular neural signal in an online and unsupervised fashion. The signal stream is continuously fed to the network and processed through several layers to output spike trains matching the truth after a short learning period requiring only few data. The network features an attention mechanism to handle the scarcity of action potential occurrences in the signal, and a threshold adaptation mechanism to handle patterns with different sizes. This method outperforms two existing spike-sorting algorithms at low signal-to-noise ratio (SNR) and can be adapted to process several channels simultaneously in the case of tetrode recordings. Such attention-based STDP network applied to spike-sorting opens perspectives to embed neuromorphic processing of neural data in future brain implants.},
author = {Bernert, Marie and Yvert, Blaise},
doi = {10.1142/s0129065718500594},
issn = {0129-0657},
journal = {International Journal of Neural Systems},
keywords = {⛔ No INSPIRE recid found},
month = dec,
note = {00016 tex.ids= Bernert2019, Bernert2019a publisher: World Scientific Publishing Co.},
number = {08},
pages = {1850059},
title = {An {Attention}-{Based} {Spiking} {Neural} {Network} for {Unsupervised} {Spike}-{Sorting}},
url = {https://www.worldscientific.com/doi/10.1142/S0129065718500594},
urldate = {2021-01-26},
volume = {29},
year = {2018},
bdsk-url-1 = {https://www.worldscientific.com/doi/10.1142/S0129065718500594},
bdsk-url-2 = {https://doi.org/10.1142/s0129065718500594}}
@techreport{bernert_fully_2017,
abstract = {Spike sorting is a crucial step of neural data processing widely used in neuroscience and neuroprosthetics. However, current methods remain not fully automatic and require heavy computations making them not embeddable in implantable devices. To overcome these limitations, we propose a novel method based on an artificial spiking neural network designed to process neural data online and completely automatically. An input layer continuously encodes the data stream into artificial spike trains, which are then processed by two further layers to output artificial trains of spikes reproducing the real spiking activity present in the input signal. The proposed method can be adapted to process several channels simultaneously in the case of tetrode recordings. It outperforms two existing algorithms at low SNR and has the advantage to be compatible with neuromorphic computing and the perspective of being embedded in very low-power analog systems for future implantable devices serving neurorehabilitation applications.},
author = {Bernert, Marie and Yvert, Blaise},
copyright = {{\copyright} 2017, Posted by Cold Spring Harbor Laboratory. This pre-print is available under a Creative Commons License (Attribution 4.0 International), CC BY 4.0, as described at http://creativecommons.org/licenses/by/4.0/},
doi = {10.1101/236224},
institution = {bioRxiv},
keywords = {⛔ No INSPIRE recid found},
language = {en},
month = dec,
note = {tex.ids= Bernert2017a section: New Results type: article},
pages = {236224},
title = {Fully unsupervised online spike sorting based on an artificial spiking neural network},
url = {https://www.biorxiv.org/content/10.1101/236224v1},
urldate = {2022-04-08},
year = {2017},
bdsk-url-1 = {https://www.biorxiv.org/content/10.1101/236224v1},
bdsk-url-2 = {https://doi.org/10.1101/236224}}
@article{bohte_error-backpropagation_2002,
abstract = {For a network of spiking neurons that encodes information in the timing of individual spike times, we derive a supervised learning rule, SpikeProp, akin to traditional error-backpropagation. With this algorithm, we demonstrate how networks of spiking neurons with biologically reasonable action potentials can perform complex non-linear classification in fast temporal coding just as well as rate-coded networks. We perform experiments for the classical XOR problem, when posed in a temporal setting, as well as for a number of other benchmark datasets. Comparing the (implicit) number of spiking neurons required for the encoding of the interpolated XOR problem, the trained networks demonstrate that temporal coding is a viable code for fast neural information processing, and as such requires less neurons than instantaneous rate-coding. Furthermore, we find that reliable temporal computation in the spiking networks was only accomplished when using spike response functions with a time constant longer than the coding interval, as has been predicted by theoretical considerations.},
author = {Bohte, Sander M. and Kok, Joost N. and La Poutr{\'e}, Han},
doi = {10.1016/S0925-2312(01)00658-0},
issn = {0925-2312},
journal = {Neurocomputing},
keywords = {\#nosource, Error-backpropagation, Spiking neurons, Temporal coding, ⛔ No INSPIRE recid found},
language = {en},
month = oct,
number = {1},
pages = {17--37},
title = {Error-backpropagation in temporally encoded networks of spiking neurons},
url = {https://www.sciencedirect.com/science/article/pii/S0925231201006580},
urldate = {2022-09-28},
volume = {48},
year = {2002},
bdsk-url-1 = {https://www.sciencedirect.com/science/article/pii/S0925231201006580},
bdsk-url-2 = {https://doi.org/10.1016/S0925-2312(01)00658-0}}
@article{bohte_evidence_2004,
author = {Bohte, Sander M},
doi = {10.1023/B:NACO.0000027755.02868.60},
journal = {Natural Computing},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
number = {2},
pages = {195--206},
title = {The evidence for neural information processing with precise spike-times: {A} survey},
volume = {3},
year = {2004},
bdsk-url-1 = {https://doi.org/10.1023/B:NACO.0000027755.02868.60}}
@article{boutin_effect_2020,
abstract = {Hierarchical Sparse Coding (HSC) is a powerful model to efficiently represent multi-dimensional, structured data such as images. The simplest solution to solve this computationally hard problem is to decompose it into independent layer-wise subproblems. However, neuroscientific evidence would suggest inter-connecting these subproblems as in the Predictive Coding (PC) theory, which adds top-down connections between consecutive layers. In this study, a new model called 2-Layers Sparse Predictive Coding (2L-SPC) is introduced to assess the impact of this inter-layer feedback connection. In particular, the 2L-SPC is compared with a Hierarchical Lasso (Hi-La) network made out of a sequence of independent Lasso layers. The 2L-SPC and the 2-layers Hi-La networks are trained on 4 different databases and with different sparsity parameters on each layer. First, we show that the overall prediction error generated by 2L-SPC is lower thanks to the feedback mechanism as it transfers prediction error between layers. Second, we demonstrate that the inference stage of the 2L-SPC is faster to converge than for the Hi-La model. Third, we show that the 2L-SPC also accelerates the learning process. Finally, the qualitative analysis of both models dictionaries, supported by their activation probability, show that the 2L-SPC features are more generic and informative.},
author = {Boutin, Victor and Franciosini, Angelo and Ruffier, Franck and Perrinet, Laurent U},
copyright = {All rights reserved},
doi = {10/fnqm},
journal = {Neural Computation},
keywords = {\#nosource, deep-learning, sparse coding, ⛔ No INSPIRE recid found},
month = feb,
number = {11},
pages = {2279--2309},
title = {Effect of top-down connections in {Hierarchical} {Sparse} {Coding}},
url = {https://laurentperrinet.github.io/publication/boutin-franciosini-ruffier-perrinet-20-feedback/},
volume = {32},
year = {2020},
bdsk-url-1 = {https://laurentperrinet.github.io/publication/boutin-franciosini-ruffier-perrinet-20-feedback/},
bdsk-url-2 = {https://doi.org/10/fnqm}}
@article{boutin_pooling_2022,
abstract = {Neurons in the primary visual cortex are selective to orientation with various degrees of selectivity to the spatial phase, from high selectivity in simple cells to low selectivity in complex cells. Various computational models have suggested a possible link between the presence of phase invariant cells and the existence of orientation maps in higher mammals' V1. These models, however, do not explain the emergence of complex cells in animals that do not show orientation maps. In this study, we build a theoretical model based on a convolutional network called Sparse Deep Predictive Coding (SDPC) and show that a single computational mechanism, pooling, allows the SDPC model to account for the emergence in V1 of complex cells with or without that of orientation maps, as observed in distinct species of mammals. In particular, we observed that pooling in the feature space is directly related to the orientation map formation while pooling in the retinotopic space is responsible for the emergence of a complex cells population. Introducing different forms of pooling in a predictive model of early visual processing as implemented in SDPC can therefore be viewed as a theoretical framework that explains the diversity of structural and functional phenomena observed in V1.},
author = {Boutin, Victor and Franciosini, Angelo and Chavane, Fr{\'e}d{\'e}ric and Perrinet, Laurent U.},
doi = {10.1371/journal.pcbi.1010270},
issn = {1553-7358},
journal = {PLOS Computational Biology},
keywords = {\#nosource, Coding mechanisms, Convolution, Neural networks, Neuronal tuning, Neurons, Neurophysiology, Visual cortex, Visual system, ⛔ No INSPIRE recid found},
language = {en},
number = {7},
pages = {e1010270},
title = {Pooling strategies in {V1} can account for the functional and structural diversity across species},
url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1010270},
urldate = {2022-09-14},
volume = {18},
year = {2022},
bdsk-url-1 = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1010270},
bdsk-url-2 = {https://doi.org/10.1371/journal.pcbi.1010270}}
@article{boutin_sparse_2020,
abstract = {Both neurophysiological and psychophysical experiments have pointed out the crucial role of recurrent and feedback connections to process context-dependent information in the early visual cortex. While numerous models have accounted for feedback effects at either neural or representational level, none of them were able to bind those two levels of analysis. Is it possible to describe feedback effects at both levels using the same model? We answer this question by combining Predictive Coding (PC) and Sparse Coding (SC) into a hierarchical and convolutional framework. In this Sparse Deep Predictive Coding (SDPC) model, the SC component models the internal recurrent processing within each layer, and the PC component describes the interactions between layers using feedforward and feedback connections. Here, we train a 2-layered SDPC on two different databases of images, and we interpret it as a model of the early visual system (V1 \& V2). We first demonstrate that once the training has converged, SDPC exhibits oriented and localized receptive fields in V1 and more complex features in V2. Second, we analyze the effects of feedback on the neural organization beyond the classical receptive field of V1 neurons using interaction maps. These maps are similar to association fields and reflect the Gestalt principle of good continuation. We demonstrate that feedback signals reorganize interaction maps and modulate neural activity to promote contour integration. Third, we demonstrate at the representational level that the SDPC feedback connections are able to overcome noise in input images. Therefore, the SDPC captures the association field principle at the neural level which results in better disambiguation of blurred images at the representational level.},
author = {Boutin, Victor and Franciosini, Angelo and Chavane, Fr{\'e}d{\'e}ric Y and Ruffier, Franck and Perrinet, Laurent U},
copyright = {All rights reserved},
doi = {10.1371/journal.pcbi.1008629},
journal = {PLoS Computational Biology},
keywords = {\#nosource, deep-learning, sparse coding, ⛔ No INSPIRE recid found},
month = may,
title = {Sparse {Deep} {Predictive} {Coding} captures contour integration capabilities of the early visual system},
url = {https://doi.org/10.1371/journal.pcbi.1008629},
year = {2020},
bdsk-url-1 = {https://doi.org/10.1371/journal.pcbi.1008629}}
@article{brette_exact_2007,
abstract = {Neural networks can be simulated exactly using event-driven strategies, in which the algorithm advances directly from one spike to the next spike. It applies to neuron models for which we have (1) an explicit expression for the evolution of the state variables between spikes and (2) an explicit test on the state variables that predicts whether and when a spike will be emitted. In a previous work, we proposed a method that allows exact simulation of an integrate-and-fire model with exponential conductances, with the constraint of a single synaptic time constant. In this note, we propose a method, based on polynomial root finding, that applies to integrate-and-fire models with exponential currents, with possibly many different synaptic time constants. Models can include biexponential synaptic currents and spike-triggered adaptation currents.},
author = {Brette, Romain},
doi = {10.1162/neco.2007.19.10.2604},
issn = {0899-7667, 1530-888X},
journal = {Neural Computation},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
language = {en},
month = oct,
number = {10},
pages = {2604--2609},
title = {Exact {Simulation} of {Integrate}-and-{Fire} {Models} with {Exponential} {Currents}},
url = {https://direct.mit.edu/neco/article/19/10/2604-2609/7220},
urldate = {2022-09-15},
volume = {19},
year = {2007},
bdsk-url-1 = {https://direct.mit.edu/neco/article/19/10/2604-2609/7220},
bdsk-url-2 = {https://doi.org/10.1162/neco.2007.19.10.2604}}
@article{brunel_phase_2000,
author = {Brunel, Nicolas},
doi = {10.1016/s0925-2312(00)00179-x},
issn = {09252312},
journal = {Neurocomputing},
keywords = {\#nosource, Integrate-and-fire neuron, Neural network, Oscillations, Synchrony, ⛔ No INSPIRE recid found},
language = {en},
month = jun,
note = {00009},
pages = {307--312},
title = {Phase diagrams of sparsely connected networks of excitatory and inhibitory spiking neurons},
url = {http://linkinghub.elsevier.com/retrieve/pii/S092523120000179X},
urldate = {2019-01-14},
volume = {32-33},
year = {2000},
bdsk-url-1 = {http://linkinghub.elsevier.com/retrieve/pii/S092523120000179X},
bdsk-url-2 = {https://doi.org/10.1016/s0925-2312(00)00179-x}}
@article{carr_circuit_1990,
author = {Carr, CE and Konishi, M},
doi = {10.1523/JNEUROSCI.10-10-03227.1990},
journal = {Journal of Neuroscience},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
number = {10},
pages = {3227--3246},
title = {A circuit for detection of interaural time differences in the brain stem of the barn owl},
volume = {10},
year = {1990},
bdsk-url-1 = {https://doi.org/10.1523/JNEUROSCI.10-10-03227.1990}}
@article{chavane_revisiting_2022,
abstract = {Horizontal connections in the primary visual cortex of carnivores, ungulates and primates organize on a near-regular lattice. Given the similar length scale for the regularity found in cortical orientation maps, the currently accepted theoretical standpoint is that these maps are underpinned by a like-to-like connectivity rule: horizontal axons connect preferentially to neurons with similar preferred orientation. However, there is reason to doubt the rule's explanatory power, since a growing number of quantitative studies show that the like-to-like connectivity preference and bias mostly observed at short-range scale, are highly variable on a neuron-to-neuron level and depend on the origin of the presynaptic neuron. Despite the wide availability of published data, the accepted model of visual processing has never been revised. Here, we review three lines of independent evidence supporting a much-needed revision of the like-to-like connectivity rule, ranging from anatomy to population functional measures, computational models and to theoretical approaches. We advocate an alternative, distance-dependent connectivity rule that is consistent with new structural and functional evidence: from like-to-like bias at short horizontal distance to like-to-all at long horizontal distance. This generic rule accounts for the observed high heterogeneity in interactions between the orientation and retinotopic domains, that we argue is necessary to process non-trivial stimuli in a task-dependent manner.},
author = {Chavane, Fr{\'e}d{\'e}ric and Perrinet, Laurent Udo and Rankin, James},
copyright = {All rights reserved},
doi = {10.1007/s00429-022-02455-4},
issn = {1863-2661},
journal = {Brain Structure and Function},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
language = {en},
month = feb,
shorttitle = {Revisiting horizontal connectivity rules in {V1}},
title = {Revisiting horizontal connectivity rules in {V1}: from like-to-like towards like-to-all},
url = {https://doi.org/10.1007/s00429-022-02455-4},
urldate = {2022-02-06},
year = {2022},
bdsk-url-1 = {https://doi.org/10.1007/s00429-022-02455-4}}
@article{dan_efficient_1996,
author = {Dan, Yang and Atick, Joseph J and Reid, R C},
doi = {10.1523/jneurosci.16-10-03351.1996},
journal = {The Journal of neuroscience : the official journal of the Society for Neuroscience},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
month = may,
number = {10},
pages = {3351--3362},
title = {Efficient coding of natural scenes in the lateral geniculate nucleus: experimental test of a computational theory},
volume = {16},
year = {1996},
bdsk-url-1 = {https://doi.org/10.1523/jneurosci.16-10-03351.1996}}
@article{dandekar_neural_2012,
abstract = {Studying neural activity during natural viewing conditions is not often attempted. Isolating the neural response of a single saccade is necessary to study neural activity during natural viewing; however, the close temporal spacing of saccades that occurs during natural viewing makes it difficult to determine the response to a single saccade. Herein, a general linear model (GLM) approach is applied to estimate the EEG neural saccadic response for different segments of the saccadic main sequence separately. It is determined that, in visual search conditions, neural responses estimated by conventional event-related averaging are significantly and systematically distorted relative to GLM estimates due to the close temporal spacing of saccades during visual search. Before the GLM is applied, analyses are applied that demonstrate that saccades during visual search with intersaccadic spacings as low as 100--150 ms do not exhibit significant refractory effects. Therefore, saccades displaying different intersaccadic spacings during visual search can be modeled using the same regressor in a GLM. With the use of the GLM approach, neural responses were separately estimated for five different ranges of saccade amplitudes during visual search. Occipital responses time locked to the onsets of saccades during visual search were found to account for, on average, 79 percent of the variance of EEG activity in a window 90--200 ms after the onsets of saccades for all five saccade amplitude ranges that spanned a range of 0.2--6.0 degrees. A GLM approach was also used to examine the lateralized ocular artifacts associated with saccades. Possible extensions of the methods presented here to account for the superposition of microsaccades in event-related EEG studies conducted in nominal fixation conditions are discussed.},
author = {Dandekar, Sangita and Privitera, Claudio and Carney, Thom and Klein, Stanley A.},
doi = {10.1152/jn.00237.2011},
issn = {0022-3077},
journal = {Journal of Neurophysiology},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
month = mar,
number = {6},
pages = {1776--1790},
title = {Neural saccadic response estimation during natural viewing},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3311669/},
urldate = {2022-09-14},
volume = {107},
year = {2012},
bdsk-url-1 = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3311669/},
bdsk-url-2 = {https://doi.org/10.1152/jn.00237.2011}}
@article{dardelet_event-by-event_2021,
abstract = {Contour velocity estimation and tracking from a fully event-based perspective.},
author = {Dardelet, Laurent and Benosman, Ryad and Ieng, Sio-Hoi},
doi = {10.36227/techrxiv.17013824.v1},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
language = {en},
month = nov,
title = {An {Event}-by-{Event} {Feature} {Detection} and {Tracking} {Invariant} to {Motion} {Direction} and {Velocity}},
urldate = {2022-09-28},
year = {2021},
bdsk-url-1 = {https://doi.org/10.36227/techrxiv.17013824.v1}}
@article{davis_spontaneous_2021,
author = {Davis, Zachary W and Benigno, Gabriel B and Fletterman, Charlee and Desbordes, Theo and Steward, Christopher and Sejnowski, Terrence J and H Reynolds, John and Muller, Lyle},
doi = {10.1038/s41467-021-26175-1},
journal = {Nature Communications},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
number = {1},
pages = {1--16},
title = {Spontaneous traveling waves naturally emerge from horizontal fiber time delays and travel through locally asynchronous-irregular states},
volume = {12},
year = {2021},
bdsk-url-1 = {https://doi.org/10.1038/s41467-021-26175-1}}
@article{deangelis_functional_1999,
author = {DeAngelis, Gregory C and Ghose, Geoffrey M and Ohzawa, Izumi and Freeman, Ralph D},
doi = {10.1523/JNEUROSCI.19-10-04046.1999},
journal = {Journal of Neuroscience},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
number = {10},
pages = {4046--4064},
title = {Functional micro-organization of primary visual cortex: receptive field analysis of nearby neurons},
volume = {19},
year = {1999},
bdsk-url-1 = {https://doi.org/10.1523/JNEUROSCI.19-10-04046.1999}}
@article{delorme_spikenet_1999,
abstract = {SpikeNET is a simulator for modeling large networks of asynchronously spiking neurons. It uses simple integrate-and-fire neurons which undergo step-like changes in membrane potential when synaptic inputs arrive. If a threshold is exceeded, the potential is reset and the neuron added to a list to be propagated on the next time step. Using such spike lists greatly reduces the computations associated with large networks, and simplifies implementations using parallel hardware since inter-processor communication can be limited to sending lists of the neurons which just fired. We have used it to model complex multi-layer architectures based on the primate visual system that involve millions of neurons and billions of synaptic connections. Such models are not only biological but also efficient, robust and very fast, qualities which they share with the human visual system.},
author = {Delorme, Arnaud and Gautrais, Jacques and van Rullen, Rufin and Thorpe, Simon},
doi = {10.1016/S0925-2312(99)00095-8},
issn = {0925-2312},
journal = {Neurocomputing},
keywords = {\#nosource, Biological visual systems, Categorization, Modeling software, Natural scenes, ⛔ No INSPIRE recid found},
language = {en},
month = jun,
pages = {989--996},
shorttitle = {{SpikeNET}},
title = {{SpikeNET}: {A} simulator for modeling large networks of integrate and fire neurons},
url = {https://www.sciencedirect.com/science/article/pii/S0925231299000958},
urldate = {2022-09-28},
volume = {26-27},
year = {1999},
bdsk-url-1 = {https://www.sciencedirect.com/science/article/pii/S0925231299000958},
bdsk-url-2 = {https://doi.org/10.1016/S0925-2312(99)00095-8}}
@article{deweese_binary_2002,
author = {DeWeese, Michael and Zador, Anthony},
journal = {Advances in neural information processing systems},
keywords = {\#nosource, ⛔ No DOI found, ⛔ No INSPIRE recid found},
title = {Binary coding in auditory cortex},
volume = {15},
year = {2002}}
@article{engbert_integrated_2011,
abstract = {When we fixate a stationary target, our eyes generate miniature (or fixational) eye movements involuntarily. These fixational eye movements are classified as slow components (physiological drift, tremor) and microsaccades, which represent rapid, small-amplitude movements. Here we propose an integrated mathematical model for the generation of slow fixational eye movements and microsaccades. The model is based on the concept of self-avoiding random walks in a potential, a process driven by a self-generated activation field. The self-avoiding walk generates persistent movements on a short timescale, whereas, on a longer timescale, the potential produces antipersistent motions that keep the eye close to an intended fixation position. We introduce microsaccades as fast movements triggered by critical activation values. As a consequence, both slow movements and microsaccades follow the same law of motion; i.e., movements are driven by the self-generated activation field. Thus, the model contributes a unified explanation of why it has been a long-standing problem to separate slow movements and microsaccades with respect to their motion-generating principles. We conclude that the concept of a self-avoiding random walk captures fundamental properties of fixational eye movements and provides a coherent theoretical framework for two physiologically distinct movement types.},
author = {Engbert, Ralf and Mergenthaler, Konstantin and Sinn, Petra and Pikovsky, Arkady},
doi = {10.1073/pnas.1102730108},
issn = {0027-8424, 1091-6490},
journal = {Proceedings of the National Academy of Sciences},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
language = {en},
month = sep,
number = {39},
pages = {E765--E770},
title = {An integrated model of fixational eye movements and microsaccades},
url = {https://www.pnas.org/content/108/39/E765},
urldate = {2021-02-18},
volume = {108},
year = {2011},
bdsk-url-1 = {https://www.pnas.org/content/108/39/E765},
bdsk-url-2 = {https://doi.org/10.1073/pnas.1102730108}}
@article{frye_elementary_2015,
author = {Frye, Mark},
doi = {10.1016/j.cub.2015.01.013},
issn = {09609822},
journal = {Current Biology},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
language = {en},
month = mar,
number = {6},
pages = {R215--R217},
title = {Elementary motion detectors},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0960982215000159},
urldate = {2022-03-21},
volume = {25},
year = {2015},
bdsk-url-1 = {https://linkinghub.elsevier.com/retrieve/pii/S0960982215000159},
bdsk-url-2 = {https://doi.org/10.1016/j.cub.2015.01.013}}
@article{Gallego2022,
abstract = {Event cameras are bio-inspired sensors that differ from conventional frame cameras: Instead of capturing images at a fixed rate, they asynchronously measure per-pixel brightness changes, and output a stream of events that encode the time, location and sign of the brightness changes. Event cameras offer attractive properties compared to traditional cameras: high temporal resolution (in the order of ms), very high dynamic range (140 dB versus 60 dB), low power consumption, and high pixel bandwidth (on the order of kHz) resulting in reduced motion blur. Hence, event cameras have a large potential for robotics and computer vision in challenging scenarios for traditional cameras, such as low-latency, high speed, and high dynamic range. However, novel methods are required to process the unconventional output of these sensors in order to unlock their potential. This paper provides a comprehensive overview of the emerging field of event-based vision, with a focus on the applications and the algorithms developed to unlock the outstanding properties of event cameras. We present event cameras from their working principle, the actual sensors that are available and the tasks that they have been used for, from low-level vision (feature detection and tracking, optic flow, etc.) to high-level vision (reconstruction, segmentation, recognition). We also discuss the techniques developed to process events, including learning-based techniques, as well as specialized processors for these novel sensors, such as spiking neural networks. Additionally, we highlight the challenges that remain to be tackled and the opportunities that lie ahead in the search for a more efficient, bio-inspired way for machines to perceive and interact with the world.},
author = {Gallego, Guillermo and Delbruck, Tobi and Orchard, Garrick and Bartolozzi, Chiara and Taba, Brian and Censi, Andrea and Leutenegger, Stefan and Davison, Andrew J. and Conradt, Jorg and Daniilidis, Kostas and Scaramuzza, Davide},
doi = {10.1109/TPAMI.2020.3008413},
issn = {0162-8828, 2160-9292, 1939-3539},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
keywords = {⛔ No INSPIRE recid found},
language = {en},
month = jan,
number = {1},
pages = {154--180},
shorttitle = {Event-{Based} {Vision}},
title = {Event-{Based} {Vision}: {A} {Survey}},
url = {https://ieeexplore.ieee.org/document/9138762/},
urldate = {2022-07-19},
volume = {44},
year = {2022},
bdsk-url-1 = {https://ieeexplore.ieee.org/document/9138762/},
bdsk-url-2 = {https://doi.org/10.1109/TPAMI.2020.3008413}}
@article{ghosh_spatiotemporal_2019,
abstract = {In this paper, we address the challenging problem of action recognition, using event-based cameras. To recognise most gestural actions, often higher temporal precision is required for sampling visual information. Actions are defined by motion, and therefore, when using event-based cameras it is often unnecessary to re-sample the entire scene. Neuromorphic, event-based cameras have presented an alternative to visual information acquisition by asynchronously time-encoding pixel intensity changes, through temporally precise spikes (≈ 10 µs resolution), making them well equipped for action recognition. However, other challenges exist, which are intrinsic to event-based imagers, such as higher signal-to-noise ratio, and a spatiotemporally sparse information. One option is to convert event-data into frames, but this could result in significant temporal precision loss. In this work we introduce spatiotemporal filtering in the spike-event domain, as an alternative way of channeling spatiotemporal information through to a convolutional neural network. The filters are local spatiotemporal weight matrices, learned from the spike-event data, in an unsupervised manner. We find that appropriate spatiotemporal filtering significantly improves CNN performance beyond state-of-the-art on the event-based DVS Gesture dataset. On our newly recorded action recognition dataset, our method shows significant improvement when compared with other, standard ways of generating the spatiotemporal filters.},
author = {Ghosh, Rohan and Gupta, Anupam and Nakagawa, Andrei and Soares, Alcimar and Thakor, Nitish},
keywords = {Computer Science - Computer Vision and Pattern Recognition, ⛔ No INSPIRE recid found},
language = {en},
month = mar,
note = {arXiv:1903.07067 [cs]},
title = {Spatiotemporal {Filtering} for {Event}-{Based} {Action} {Recognition}},
url = {http://arxiv.org/abs/1903.07067},
urldate = {2023-01-20},
year = {2019},
bdsk-url-1 = {http://arxiv.org/abs/1903.07067}}
@article{gollisch_rapid_2008,
author = {Gollisch, Tim and Meister, Markus},
doi = {10.1126/science.1149639},
journal = {Science (New York, N.Y.)},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
number = {5866},
pages = {1108--1111},
title = {Rapid neural coding in the retina with relative spike latencies},
volume = {319},
year = {2008},
bdsk-url-1 = {https://doi.org/10.1126/science.1149639}}
@article{goodman_spike-timing-based_2010,
abstract = {Spike timing is precise in the auditory system and it has been argued that it conveys information about auditory stimuli, in particular about the location of a sound source. However, beyond simple time differences, the way in which neurons might extract this information is unclear and the potential computational advantages are unknown. The computational difficulty of this task for an animal is to locate the source of an unexpected sound from two monaural signals that are highly dependent on the unknown source signal. In neuron models consisting of spectro-temporal filtering and spiking nonlinearity, we found that the binaural structure induced by spatialized sounds is mapped to synchrony patterns that depend on source location rather than on source signal. Location-specific synchrony patterns would then result in the activation of location-specific assemblies of postsynaptic neurons. We designed a spiking neuron model which exploited this principle to locate a variety of sound sources in a virtual acoustic environment using measured human head-related transfer functions. The model was able to accurately estimate the location of previously unknown sounds in both azimuth and elevation (including front/back discrimination) in a known acoustic environment. We found that multiple representations of different acoustic environments could coexist as sets of overlapping neural assemblies which could be associated with spatial locations by Hebbian learning. The model demonstrates the computational relevance of relative spike timing to extract spatial information about sources independently of the source signal.},
author = {Goodman, Dan F. M. and Brette, Romain},
doi = {10.1371/journal.pcbi.1000993},
issn = {1553-7358},
journal = {PLoS Comput Biol},
keywords = {\#nosource, spike, spikes, synchrony, ⛔ No INSPIRE recid found},
month = nov,
number = {11},
pmid = {21085681},
title = {Spike-timing-based computation in sound localization.},
url = {http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2978676/},
volume = {6},
year = {2010},
bdsk-url-1 = {http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2978676/},
bdsk-url-2 = {https://doi.org/10.1371/journal.pcbi.1000993}}
@inproceedings{grimaldi_learning_2022,
abstract = {The response of a biological neuron depends on the precise timing of afferent spikes. This temporal aspect of the neuronal code is essential in understanding information processing in neurobiology and applies particularly well to the output of neuromorphic hardware such as event-based cameras. However, most artificial neuronal models do not take advantage of this minute temporal dimension and here, we develop a model for the efficient detection of temporal spiking motifs based on a layer of neurons with hetero-synaptic delays. Indeed, the variety of synaptic delays on the dendritic tree allows to synchronize synaptic inputs as they reach the basal dendritic tree. We show this can be formalized as a time-invariant logistic regression which can be trained using labelled data. We apply this model to solve the specific computer vision problem of motion detection, and demonstrate its application to synthetic naturalistic videos transformed into event streams similar to the output of event-based cameras. In particular, we quantify how its accuracy can vary with the total computational load. This end-to-end event-driven computational brick could help improve the performance of future Spiking Neural Network (SNN) algorithms and their prospective use in neuromorphic chips.},
author = {Grimaldi, Antoine and Perrinet, Laurent U},
booktitle = {2022 {IEEE} {International} {Conference} on {Image} {Processing} ({ICIP})},
doi = {10.1109/ICIP46576.2022.9897394},
keywords = {Biological neural networks, Cameras, Delays, Motion detection, Neuromorphics, Neurons, Synchronization, efficient coding, event-based computations, logistic regression, motion detection, spiking neural networks, time code, ⛔ No INSPIRE recid found},
month = oct,
note = {ISSN: 2381-8549},
pages = {3591--3595},
title = {Learning hetero-synaptic delays for motion detection in a single layer of spiking neurons},
year = {2022},
bdsk-url-1 = {https://doi.org/10.1109/ICIP46576.2022.9897394}}
@article{grimaldi_precise_2023,
abstract = {Why do neurons communicate through spikes? By definition, spikes are all-or-none neural events which occur at continuous times. In other words, spikes are on one side binary, existing or not without further details, and on the other, can occur at any asynchronous time, without the need for a centralized clock. This stands in stark contrast to the analog representation of values and the discretized timing classically used in digital processing and at the base of modern-day neural networks. As neural systems almost systematically use this so-called event-based representation in the living world, a better understanding of this phenomenon remains a fundamental challenge in neurobiology in order to better interpret the profusion of recorded data. With the growing need for intelligent embedded systems, it also emerges as a new computing paradigm to enable the efficient operation of a new class of sensors and event-based computers, called neuromorphic, which could enable significant gains in computation time and energy consumption---a major societal issue in the era of the digital economy and global warming. In this review paper, we provide evidence from biology, theory and engineering that the precise timing of spikes plays a crucial role in our understanding of the efficiency of neural networks.},
author = {Grimaldi, Antoine and Gruel, Am{\'e}lie and Besnainou, Camille and J{\'e}r{\'e}mie, Jean-Nicolas and Martinet, Jean and Perrinet, Laurent U.},
copyright = {http://creativecommons.org/licenses/by/3.0/},
doi = {10.3390/brainsci13010068},
issn = {2076-3425},
journal = {Brain Sciences},
keywords = {asynchronous computing, computational neuroscience, heterogeneous delays, neurobiology, neuromorphic engineering, polychronization, spikes, spiking motifs, ⛔ No INSPIRE recid found},
language = {en},
month = jan,
note = {Number: 1 Publisher: Multidisciplinary Digital Publishing Institute},
number = {1},
pages = {68},
title = {Precise {Spiking} {Motifs} in {Neurobiological} and {Neuromorphic} {Data}},
url = {https://www.mdpi.com/2076-3425/13/1/68},
urldate = {2023-01-31},
volume = {13},
year = {2023},
bdsk-url-1 = {https://www.mdpi.com/2076-3425/13/1/68},
bdsk-url-2 = {https://doi.org/10.3390/brainsci13010068}}
@article{grimaldi_robust_2022,
abstract = {We propose a neuromimetic architecture able to perform always-on pattern recognition. To achieve this, we extended an existing event-based algorithm [1], which introduced novel spatio-temporal features as a Hierarchy Of Time-Surfaces (HOTS). Built from asynchronous events acquired by a neuromorphic camera, these time surfaces allow to code the local dynamics of a visual scene and to create an efficient event-based pattern recognition architecture. Inspired by neuroscience, we extended this method to increase its performance. Our first contribution was to add a homeostatic gain control on the activity of neurons to improve the learning of spatio-temporal patterns [2]. A second contribution is to draw an analogy between the HOTS algorithm and Spiking Neural Networks (SNN). Following that analogy, our last contribution is to modify the classification layer and remodel the offline pattern categorization method previously used into an online and event-driven one. This classifier uses the spiking output of the network to define novel time surfaces and we then perform online classification with a neuromimetic implementation of a multinomial logistic regression. Not only do these improvements increase consistently the performances of the network, they also make this event-driven pattern recognition algorithm online and bio-realistic. Results were validated on different datasets: DVS barrel [3], Poker-DVS [4] and N-MNIST [5]. We foresee to develop the SNN version of the method and to extend this fully event-driven approach to more naturalistic tasks, notably for always-on, ultra-fast object categorization.},
author = {Grimaldi, Antoine and Boutin, Victor and Ieng, Sio-Hoi and Benosman, Ryad and Perrinet, Laurent},
doi = {10.36227/techrxiv.18003077.v1},
keywords = {efficient coding, event-based vision, homeostasis, neuromorphic hardware, online classification, ⛔ No INSPIRE recid found},
language = {en},
month = jan,
title = {A robust event-driven approach to always-on object recognition},
url = {https://www.techrxiv.org/articles/preprint/A_robust_event-driven_approach_to_always-on_object_recognition/18003077/1},
urldate = {2022-01-13},
year = {2022},
bdsk-url-1 = {https://www.techrxiv.org/articles/preprint/A_robust_event-driven_approach_to_always-on_object_recognition/18003077/1},
bdsk-url-2 = {https://doi.org/10.36227/techrxiv.18003077.v1}}
@article{guise_bayesian_2014,
author = {Guise, Mira and Knott, Alistair and Benuskova, Lubica},
doi = {10/f6chbq},
journal = {Neural Computation},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
number = {9},
pages = {2052--2073},
title = {A {Bayesian} model of polychronicity},
volume = {26},
year = {2014},
bdsk-url-1 = {https://doi.org/10/f6chbq}}
@article{gutig_tempotron_2006,
abstract = {The timing of action potentials in sensory neurons contains substantial information about the eliciting stimuli. Although the computational advantages of spike timing--based neuronal codes have long been recognized, it is unclear whether, and if so how, neurons can learn to read out such representations. We propose a new, biologically plausible supervised synaptic learning rule that enables neurons to efficiently learn a broad range of decision rules, even when information is embedded in the spatiotemporal structure of spike patterns rather than in mean firing rates. The number of categorizations of random spatiotemporal patterns that a neuron can implement is several times larger than the number of its synapses. The underlying nonlinear temporal computation allows neurons to access information beyond single-neuron statistics and to discriminate between inputs on the basis of multineuronal spike statistics. Our work demonstrates the high capacity of neural systems to learn to decode information embedded in distributed patterns of spike synchrony.},
author = {G{\"u}tig, Robert and Sompolinsky, Haim},
doi = {10.1038/nn1643},
issn = {1546-1726},
journal = {Nature Neuroscience},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
language = {english},
month = mar,
number = {3},
pages = {420--428},
shorttitle = {The tempotron},
title = {The tempotron: {A} neuron that learns spike {Timing}--{Based} decisions},
url = {http://www.nature.com/articles/nn1643/},
urldate = {2022-01-31},
volume = {9},
year = {2006},
bdsk-url-1 = {http://www.nature.com/articles/nn1643/},
bdsk-url-2 = {https://doi.org/10.1038/nn1643}}
@article{haag_fly_2004,
abstract = {The computational structure of an optimal motion detector was proposed to depend on the signal-to-noise ratio (SNR) of the stimulus: At low SNR, the optimal motion detector should be a correlation or "Reichardt" type, whereas at high SNR, the detector would employ a gradient scheme [Potters, M. \& Bialek, W. (1994) J. Physiol. (Paris) 4, 1755-1775]. Although a large body of experiments supports the Reichardt detector as the processing scheme leading to direction selectivity in fly motion vision, in most of these studies the SNR was rather low. We therefore reinvestigated the question over a much larger SNR range. Using 2-photon microscopy, we found that local dendritic [Ca(2+)] modulations, which are characteristic of Reichardt detectors, occur in response to drifting gratings over a wide range of luminance levels and contrasts. We also explored, as another fingerprint of Reichardt detectors, the dependence of the velocity optimum on the pattern wavelength. Again, we found Reichardt-typical behavior throughout the whole luminance and contrast range tested. Our results, therefore, provide strong evidence that only a single elementary processing scheme is used in fly motion vision.},
author = {Haag, J. and Denk, W. and Borst, A.},
doi = {10.1073/pnas.0407368101},
issn = {0027-8424},
journal = {Proceedings of the National Academy of Sciences of the United States of America},
keywords = {\#nosource, Algorithms, Animals, Calcium Signaling, Diptera, Electrophysiology, Female, Models, Models, Neurological, Motion, Motion Perception, Neurological, Ocular, Optics and Photonics, Photic Stimulation, Vision, Vision, Ocular, biology, delay-learning, insects, ⛔ No INSPIRE recid found},
language = {eng},
month = nov,
number = {46},
pages = {16333--16338},
pmcid = {PMC526200},
pmid = {15534201},
title = {Fly motion vision is based on {Reichardt} detectors regardless of the signal-to-noise ratio},
volume = {101},
year = {2004},
bdsk-url-1 = {https://doi.org/10.1073/pnas.0407368101}}
@article{haimerl_internal_2019,
abstract = {The hippocampus plays a critical role in episodic memory: the sequential representation of visited places and experienced events. This function is mirrored by hippocampal activity that self organizes into sequences of neuronal activation that integrate spatiotemporal information. What are the underlying mechanisms of such integration is still unknown. Single cell activity was recently shown to combine time and distance information; however, it remains unknown whether a degree of tuning between space and time can be defined at the network level. Here, combining daily calcium imaging of CA1 sequence dynamics in running head-fixed mice and network modeling, we show that CA1 network activity tends to represent a specific combination of space and time at any given moment, and that the degree of tuning can shift within a continuum from 1 day to the next. Our computational model shows that this shift in tuning can happen under the control of the external drive power. We propose that extrinsic global inputs shape the nature of spatiotemporal integration in the hippocampus at the population level depending on the task at hand, a hypothesis which may guide future experimental studies.},
author = {Haimerl, Caroline and Angulo-Garcia, David and Villette, Vincent and Reichinnek, Susanne and Torcini, Alessandro and Cossart, Rosa and Malvache, Arnaud},
doi = {10.1073/pnas.1718518116},
issn = {0027-8424, 1091-6490},
journal = {Proceedings of the National Academy of Sciences},
keywords = {\#nosource, attractor network, hippocampus, neural model, space representation, time representation, ⛔ No INSPIRE recid found},
language = {en},
month = apr,
number = {15},
pages = {7477--7482},
title = {Internal representation of hippocampal neuronal population spans a time-distance continuum},
url = {https://www.pnas.org/content/116/15/7477},
urldate = {2022-01-17},
volume = {116},
year = {2019},
bdsk-url-1 = {https://www.pnas.org/content/116/15/7477},
bdsk-url-2 = {https://doi.org/10.1073/pnas.1718518116}}
@article{hanuschkin_general_2010,
abstract = {Traditionally, event-driven simulations have been limited to the very restricted class of neuronal models for which the timing of future spikes can be expressed in closed form. Recently, the class of models that is amenable to event-driven simulation has been extended by the development of techniques to accurately calculate firing times for some integrate-and-fire neuron models that do not enable the prediction of future spikes in closed form. The motivation of this development is the general perception that time-driven simulations are imprecise. Here, we demonstrate that a globally time-driven scheme can calculate firing times that cannot be discriminated from those calculated by an event-driven implementation of the same model; moreover, the time-driven scheme incurs lower computational costs. The key insight is that time-driven methods are based on identifying a threshold crossing in the recent past, which can be implemented by a much simpler algorithm than the techniques for predicting future threshold crossings that are necessary for event-driven approaches. As run time is dominated by the cost of the operations performed at each incoming spike, which includes spike prediction in the case of event-driven simulation and retrospective detection in the case of time-driven simulation, the simple time-driven algorithm outperforms the event-driven approaches. Additionally, our method is generally applicable to all commonly used integrate-and-fire neuronal models; we show that a non-linear model employing a standard adaptive solver can reproduce a reference spike train with a high degree of precision.},
author = {Hanuschkin, Alexander and Kunkel, Susanne and Helias, Moritz and Morrison, Abigail and Diesmann, Markus},
doi = {10.3389/fninf.2010.00113},
issn = {1662-5196},
journal = {Frontiers in Neuroinformatics},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
month = oct,
pages = {113},
title = {A {General} and {Efficient} {Method} for {Incorporating} {Precise} {Spike} {Times} in {Globally} {Time}-{Driven} {Simulations}},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2965048/},
urldate = {2022-09-14},
volume = {4},
year = {2010},
bdsk-url-1 = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2965048/},
bdsk-url-2 = {https://doi.org/10.3389/fninf.2010.00113}}
@article{hogendoorn_predictive_2019,
author = {Hogendoorn, Hinze and Burkitt, Anthony N.},
doi = {10.1523/eneuro.0412-18.2019},
issn = {2373-2822},
journal = {eneuro},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
language = {en},
month = mar,
number = {2},
pages = {ENEURO.0412--18.2019},
shorttitle = {Predictive {Coding} with {Neural} {Transmission} {Delays}},
title = {Predictive {Coding} with {Neural} {Transmission} {Delays}: {A} {Real}-{Time} {Temporal} {Alignment} {Hypothesis}},
url = {http://eneuro.org/lookup/doi/10.1523/ENEURO.0412-18.2019},
urldate = {2019-11-12},
volume = {6},
year = {2019},
bdsk-url-1 = {http://eneuro.org/lookup/doi/10.1523/ENEURO.0412-18.2019},
bdsk-url-2 = {https://doi.org/10.1523/eneuro.0412-18.2019}}
@inproceedings{howard_searching_2019,
author = {Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and Le, Quoc V. and Adam, Hartwig},
booktitle = {Proceedings of the {IEEE}/{CVF} international conference on computer vision ({ICCV})},
keywords = {Computer Science - Computer Vision and Pattern Recognition, ⛔ No INSPIRE recid found},
month = oct,
title = {Searching for {MobileNetV3}},
year = {2019}}
@article{ikegaya_synfire_2004,
author = {Ikegaya, Yuji and Aaron, Gloster and Cossart, Rosa and Aronov, Dmitriy and Lampl, Ilan and Ferster, David and Yuste, Rafael},
doi = {10.1126/science.1093173},
journal = {Science},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
month = apr,
number = {5670},
pages = {559--564},
shorttitle = {Synfire {Chains} and {Cortical} {Songs}},
title = {Synfire {Chains} and {Cortical} {Songs}: {Temporal} {Modules} of {Cortical} {Activity}},
url = {http://www.science.org/doi/10.1126/science.1093173},
urldate = {2021-11-29},
volume = {304},
year = {2004},
bdsk-url-1 = {http://www.science.org/doi/10.1126/science.1093173},
bdsk-url-2 = {https://doi.org/10.1126/science.1093173}}
@article{izhikevich_polychronization_2006,
author = {Izhikevich, Eugene M},
doi = {10.1162/089976606775093882},
journal = {Neural computation},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
number = {2},
pages = {245--282},
title = {Polychronization: computation with spikes},
volume = {18},
year = {2006},
bdsk-url-1 = {https://doi.org/10.1162/089976606775093882}}
@article{kaplan_anisotropic_2013,
abstract = {Predictive coding hypothesizes that the brain explicitly infers upcoming sensory input to establish a coherent representation of the world. Although it is becoming generally accepted, it is not clear on which level spiking neural networks may implement predictive coding and what function their connectivity may have. We present a network model of conductance-based integrate-and-fire neurons inspired by the architecture of retinotopic cortical areas that assumes predictive coding is implemented through network connectivity, namely in the connection delays and in selectiveness for the tuning properties of source and target cells. We show that the applied connection pattern leads to motion-based prediction in an experiment tracking a moving dot. In contrast to our proposed model, a network with random or isotropic connectivity fails to predict the path when the moving dot disappears. Furthermore, we show that a simple linear decoding approach is sufficient to transform neuronal spiking activity into a probabilistic estimate for reading out the target trajectory.},
author = {Kaplan, Bernhard and Lansner, Anders and Masson, Guillaume and Perrinet, Laurent},
doi = {10.3389/fncom.2013.00112},
issn = {1662-5188},
journal = {Frontiers in Computational Neuroscience},
keywords = {⛔ No INSPIRE recid found},
title = {Anisotropic connectivity implements motion-based prediction in a spiking neural network},
url = {https://www.frontiersin.org/articles/10.3389/fncom.2013.00112},
urldate = {2023-04-06},
volume = {7},
year = {2013},
bdsk-url-1 = {https://www.frontiersin.org/articles/10.3389/fncom.2013.00112},
bdsk-url-2 = {https://doi.org/10.3389/fncom.2013.00112}}
@article{khoei_flash-lag_2017,
abstract = {Due to its inherent neural delays, the visual system has an outdated access to sensory information about the current position of moving objects. In contrast, living organisms are remarkably able to track and intercept moving objects under a large range of challenging environmental conditions. Physiological, behavioral and psychophysical evidences strongly suggest that position coding is extrapolated using an explicit and reliable representation of object's motion but it is still unclear how these two representations interact. For instance, the so-called flash-lag effect supports the idea of a differential processing of position between moving and static objects. Although elucidating such mechanisms is crucial in our understanding of the dynamics of visual processing, a theory is still missing to explain the different facets of this visual illusion. Here, we reconsider several of the key aspects of the flash-lag effect in order to explore the role of motion upon neural coding of objects' position. First, we formalize the problem using a Bayesian modeling framework which includes a graded representation of the degree of belief about visual motion. We introduce a motion-based prediction model as a candidate explanation for the perception of coherent motion. By including the knowledge of a fixed delay, we can model the dynamics of sensory information integration by extrapolating the information acquired at previous instants in time. Next, we simulate the optimal estimation of object position with and without delay compensation and compared it with human perception under a broad range of different psychophysical conditions. Our computational study suggests that the explicit, probabilistic representation of velocity information is crucial in explaining position coding, and therefore the flash-lag effect. We discuss these theoretical results in light of the putative corrective mechanisms that can be used to cancel out the detrimental effects of neural delays and illuminate the more general question of the dynamical representation at the present time of spatial information in the visual pathways.},
author = {Khoei, Mina A. and Masson, Guillaume S. and Perrinet, Laurent U.},
copyright = {Licence Creative Commons Attribution - Pas d'utilisation commerciale - Partage dans les m{\^e}mes conditions 4.0 International (CC-BY-NC-SA)},
doi = {10.1371/journal.pcbi.1005068},
issn = {1553-7358},
journal = {PLOS Computational Biology},
keywords = {\#nosource, Coding mechanisms, Extrapolation, Motion, Psychophysics, Sensory perception, Velocity, Vision, Visual system, ⛔ No INSPIRE recid found},
language = {en},
month = jan,
number = {1},
pages = {e1005068},
title = {The {Flash}-{Lag} {Effect} as a {Motion}-{Based} {Predictive} {Shift}},
url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005068},
urldate = {2022-08-31},
volume = {13},
year = {2017},
bdsk-url-1 = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005068},
bdsk-url-2 = {https://doi.org/10.1371/journal.pcbi.1005068}}
@article{koenderink_representation_1987,
abstract = {It is shown that a convolution with certain reasonable receptive field (RF) profiles yields the exact partial derivatives of the retinal illuminance blurred to a specified degree. Arbitrary concatenations of such RF profiles yield again similar ones of higher order and for a greater degree of blurring.},
author = {Koenderink, J. J. and van Doorn, A. J.},
doi = {10.1007/BF00318371},
issn = {1432-0770},
journal = {Biological Cybernetics},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
language = {en},
month = mar,
number = {6},
pages = {367--375},
title = {Representation of local geometry in the visual system},
url = {https://doi.org/10.1007/BF00318371},
urldate = {2022-08-31},
volume = {55},
year = {1987},
bdsk-url-1 = {https://doi.org/10.1007/BF00318371}}
@article{kremkow_push-pull_2016,
abstract = {Neurons in the primary visual cortex are known for responding vigorously but with high variability to classical stimuli such as drifting bars or gratings. By contrast, natural scenes are encoded more efficiently by sparse and temporal precise spiking responses. We used a conductance-based model of the visual system in higher mammals to investigate how two specific features of the thalamo-cortical pathway, namely push-pull receptive field organization and synaptic depression, can contribute to this contextual reshaping of V1 responses. By comparing cortical dynamics evoked respectively by natural vs. artificial stimuli in a comprehensive parametric space analysis, we demonstrate that the reliability and sparseness of the spiking responses during natural vision is not a mere consequence of the increased bandwidth in the sensory input spectrum. Rather, it results from the combined impacts of synaptic depression and push-pull inhibition, the later acting for natural scenes as a form of ``effective'' feed-forward inhibition as demonstrated in other sensory systems. Thus, the combination of feedforward-like inhibition with fast thalamo-cortical synaptic depression by simple cells receiving a direct structured input from thalamus composes a generic computational mechanism for generating a sparse and reliable encoding of natural sensory events.},
author = {Kremkow, Jens and Perrinet, Laurent U. and Monier, Cyril and Alonso, Jose-Manuel and Aertsen, Ad and Fr{\'e}gnac, Yves and Masson, Guillaume S.},
copyright = {All rights reserved},
doi = {10.3389/fncir.2016.00037},
issn = {1662-5110},
journal = {Frontiers in Neural Circuits},
keywords = {\#nosource, Excitation/inhibition, RetinaClouds, Sensory coding, Visual Cortex, area V1, area-v1, natural visual stimuli, push-pull receptive field, statistics of natural images, ⛔ No INSPIRE recid found},
language = {English},
month = may,
title = {Push-{Pull} {Receptive} {Field} {Organization} and {Synaptic} {Depression}: {Mechanisms} for {Reliably} {Encoding} {Naturalistic} {Stimuli} in {V1}},
url = {https://doi.org/10.3389/fncir.2016.00037},
volume = {10},
year = {2016},
bdsk-url-1 = {https://doi.org/10.3389/fncir.2016.00037}}
@article{lagorce_hots_2017,
abstract = {This paper describes novel event-based spatiotemporal features called time-surfaces and how they can be used to create a hierarchical event-based pattern recognition architecture. Unlike existing hierarchical architectures for pattern recognition, the presented model relies on a time oriented approach to extract spatio-temporal features from the asynchronously acquired dynamics of a visual scene. These dynamics are acquired using biologically inspired frameless asynchronous event-driven vision sensors. Similarly to cortical structures, subsequent layers in our hierarchy extract increasingly abstract features using increasingly large spatio-temporal windows. The central concept is to use the rich temporal information provided by events to create contexts in the form of time-surfaces which represent the recent temporal activity within a local spatial neighborhood. We demonstrate that this concept can robustly be used at all stages of an event-based hierarchical model. First layer feature units operate on groups of pixels, while subsequent layer feature units operate on the output of lower level feature units. We report results on a previously published 36 class character recognition task and a 4 class canonical dynamic card pip task, achieving near 100\% accuracy on each. We introduce a new 7 class moving face recognition task, achieving 79\% accuracy.},
author = {Lagorce, Xavier and Orchard, Garrick and Galluppi, Francesco and Shi, Bertram E. and Benosman, Ryad B.},
doi = {10.1109/TPAMI.2016.2574707},
issn = {0162-8828},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
keywords = {\#nosource, Neuromorphic sensing, event-based vision, feature extraction, ⛔ No INSPIRE recid found},
number = {7},
pages = {1346--1359},
title = {{HOTS}: {A} {Hierarchy} of {Event}-{Based} {Time}-{Surfaces} for {Pattern} {Recognition}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/27411216%20http://ieeexplore.ieee.org/document/7508476/},
volume = {39},
year = {2017},
bdsk-url-1 = {http://www.ncbi.nlm.nih.gov/pubmed/27411216%20http://ieeexplore.ieee.org/document/7508476/},
bdsk-url-2 = {https://doi.org/10.1109/TPAMI.2016.2574707}}
@article{le_bec_horizontal_2022,
abstract = {This study demonstrates the functional importance of the Surround context relayed laterally in V1 by the horizontal connectivity, in controlling the latency and the gain of the cortical response to the feedforward visual drive. We report here four main findings: 1) a centripetal apparent motion sequence results in a shortening of the spiking latency of V1 cells, when the orientation of the local inducer and the global motion axis are both co-aligned with the RF orientation preference; 2) this contextual effects grows with visual flow speed, peaking at 150--250$\,^{\circ}$/s when it matches the propagation speed of horizontal connectivity (0.15--0.25 mm/ms); 3) For this speed range, the axial sensitivity of V1 cells is tilted by 90$\,^{\circ}$ to become co-aligned with the orientation preference axis; 4) the strength of modulation by the surround context correlates with the spatiotemporal coherence of the apparent motion flow. Our results suggest an internally-generated binding process, linking local (orientation /position) and global (motion/direction) features as early as V1. This long-range diffusion process constitutes a plausible substrate in V1 of the human psychophysical bias in speed estimation for collinear motion. Since it is demonstrated in the anesthetized cat, this novel form of contextual control of the cortical gain and phase is a built-in property in V1, whose expression does not require behavioral attention and top-down control from higher cortical areas. We propose that horizontal connectivity participates in the propagation of an internal ``prediction'' wave, shaped by visual experience, which links contour co-alignment and global axial motion at an apparent speed in the range of saccade-like eye movements.},
author = {Le Bec, Benoit and Troncoso, Xoana G. and Desbois, Christophe and Passarelli, Yannick and Baudot, Pierre and Monier, Cyril and Pananceau, Marc and Fr{\'e}gnac, Yves},
doi = {10.1371/journal.pone.0268351},
editor = {Charpier, St{\'e}phane},
issn = {1932-6203},
journal = {PLOS ONE},
keywords = {⛔ No INSPIRE recid found},
language = {en},
month = jul,
number = {7},
pages = {e0268351},
shorttitle = {Horizontal connectivity in {V1}},
title = {Horizontal connectivity in {V1}: {Prediction} of coherence in contour and motion integration},
url = {https://dx.plos.org/10.1371/journal.pone.0268351},
urldate = {2022-09-26},
volume = {17},
year = {2022},
bdsk-url-1 = {https://dx.plos.org/10.1371/journal.pone.0268351},
bdsk-url-2 = {https://doi.org/10.1371/journal.pone.0268351}}
@article{lecun_gradient-based_1998,
author = {Lecun, Y. and Bottou, L. and Bengio, Y. and Haffner, P.},
doi = {10.1109/5.726791},
issn = {00189219},
journal = {Proceedings of the IEEE},
keywords = {⛔ No INSPIRE recid found},
month = nov,
note = {tex.bdsk-url-2: https://doi.org/10.1109/5.726791 tex.date-added: 2022-05-05 19:08:15 +0200 tex.date-modified: 2022-05-05 19:08:15 +0200},
number = {11},
pages = {2278--2324},
title = {Gradient-based learning applied to document recognition},
url = {http://ieeexplore.ieee.org/document/726791/},
urldate = {2021-05-18},
volume = {86},
year = {1998},
bdsk-url-1 = {http://ieeexplore.ieee.org/document/726791/},
bdsk-url-2 = {https://doi.org/10.1109/5.726791}}
@inproceedings{lee_real-time_2014,
abstract = {Fast and efficient motion estimation is essential for a number of applications including the gesture-based user interface (UI) for portable devices like smart phones. In this paper, we propose a highly efficient method that can estimate four degree of freedom (DOF) motional components of a moving object based on an event-based vision sensor, the dynamic vision sensor (DVS). The proposed method finds informative events occurred at edges and estimates their velocities for global motion analysis. We will also describe a novel method to correct the aperture problem in the motion estimation.},
address = {Paris, France},
author = {Lee, Jun Haeng and Lee, Kyoobin and Ryu, Hyunsurk and Park, Paul K. J. and Shin, Chang-Woo and Woo, Jooyeon and Kim, Jun-Seok},
booktitle = {2014 {IEEE} {International} {Conference} on {Image} {Processing} ({ICIP})},
doi = {10.1109/ICIP.2014.7025040},
isbn = {978-1-4799-5751-4},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
language = {en},
month = oct,
pages = {204--208},
publisher = {IEEE},
title = {Real-time motion estimation based on event-based vision sensor},
url = {http://ieeexplore.ieee.org/document/7025040/},
urldate = {2022-07-19},
year = {2014},
bdsk-url-1 = {http://ieeexplore.ieee.org/document/7025040/},
bdsk-url-2 = {https://doi.org/10.1109/ICIP.2014.7025040}}
@article{leon_motion_2012,
author = {Leon, Paula Sanz and Vanzetta, Ivo and Masson, Guillaume S and Perrinet, Laurent U},
doi = {10.1152/jn.00737.2011},
journal = {Journal of Neurophysiology},
keywords = {\#nosource, Eye movements, Low-level sensory systems, Motion detection, Natural scenes, Optimal stimulation, Python, anr-trax, bicv-sparse, kaplan13, log-gabor, motion-clouds, perrinetadamsfriston14, sanz12jnp, vacher14, ⛔ No INSPIRE recid found},
number = {11},
pages = {3217--3226},
title = {Motion {Clouds}: {Model}-based stimulus synthesis of natural-like random textures for the study of motion perception},
volume = {107},
year = {2012},
bdsk-url-1 = {https://doi.org/10.1152/jn.00737.2011}}
@article{Levy2021,
author = {Levy, William B and Calvert, Victoria G.},
doi = {10.1073/pnas.2008173118},
issn = {0027-8424, 1091-6490},
journal = {Proceedings of the National Academy of Sciences},
keywords = {⛔ No INSPIRE recid found},
language = {en},
month = may,
number = {18},
pages = {e2008173118},
title = {Communication consumes 35 times more energy than computation in the human cortex, but both costs are needed to predict synapse number},
url = {https://pnas.org/doi/full/10.1073/pnas.2008173118},
urldate = {2023-06-27},
volume = {118},
year = {2021},
bdsk-url-1 = {https://pnas.org/doi/full/10.1073/pnas.2008173118},
bdsk-url-2 = {https://doi.org/10.1073/pnas.2008173118}}
@article{lin_supervised_2021,
abstract = {As a new brain-inspired computational model of artificial neural networks, spiking neural networks transmit and process information via precisely timed spike trains. Constructing efficient learning methods is a significant research field in spiking neural networks. In this paper, we present a supervised learning algorithm for multilayer feedforward spiking neural networks; all neurons can fire multiple spikes in all layers. The feedforward network consists of spiking neurons governed by biologically plausible long-term memory spike response model, in which the effect of earlier spikes on the refractoriness is not neglected to incorporate adaptation effects. The gradient descent method is employed to derive synaptic weight updating rule for learning spike trains. The proposed algorithm is tested and verified on spatiotemporal pattern learning problems, including a set of spike train learning tasks and nonlinear pattern classification problems on four UCI datasets. Simulation results indicate that the proposed algorithm can improve learning accuracy in comparison with other supervised learning algorithms.},
author = {Lin, Xianghong and Zhang, Mengwei and Wang, Xiangwen},
doi = {10.1155/2021/8592824},
issn = {1687-5265},
journal = {Computational Intelligence and Neuroscience},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
month = nov,
pages = {8592824},
title = {Supervised {Learning} {Algorithm} for {Multilayer} {Spiking} {Neural} {Networks} with {Long}-{Term} {Memory} {Spike} {Response} {Model}},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8635912/},
urldate = {2022-09-14},
volume = {2021},
year = {2021},
bdsk-url-1 = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8635912/},
bdsk-url-2 = {https://doi.org/10.1155/2021/8592824}}
@article{luczak_sequential_2007,
abstract = {Even in the absence of sensory stimulation, the neocortex shows complex spontaneous activity patterns, often consisting of alternating ``DOWN'' states of generalized neural silence and ``UP'' states of massive, persistent network activity. To investigate how this spontaneous activity propagates through neuronal assemblies in vivo, we simultaneously recorded populations of 50--200 cortical neurons in layer V of anesthetized and awake rats. Each neuron displayed a virtually unique spike pattern during UP states, with diversity seen amongst both putative pyramidal cells and interneurons, reflecting a complex but stereotypically organized sequential spread of activation through local cortical networks. Spike timing was most precise during the first ≈100 ms after UP state onset, and decayed as UP states progressed. A subset of UP states propagated as traveling waves, but waves passing a given point in either direction initiated similar local sequences, suggesting local networks as the substrate of sequential firing patterns. A search for repeating motifs indicated that their occurrence and structure was predictable from neurons' individual latencies to UP state onset. We suggest that these stereotyped patterns arise from the interplay of intrinsic cellular conductances and local circuit properties.},
author = {Luczak, Artur and Barth{\'o}, Peter and Marguet, Stephan L. and Buzs{\'a}ki, Gy{\"o}rgy and Harris, Kenneth D.},
doi = {10.1073/pnas.0605643104},
issn = {0027-8424, 1091-6490},
journal = {Proceedings of the National Academy of Sciences},
keywords = {\#nosource, microcircuits, neuronal assembly, repeating sequences, slow oscillations, syntire chains, ⛔ No INSPIRE recid found},
language = {en},
month = jan,
number = {1},
pages = {347--352},
title = {Sequential structure of neocortical spontaneous activity in vivo},
url = {https://www.pnas.org/content/104/1/347},
urldate = {2022-02-23},
volume = {104},
year = {2007},
bdsk-url-1 = {https://www.pnas.org/content/104/1/347},
bdsk-url-2 = {https://doi.org/10.1073/pnas.0605643104}}
@article{luo_supervised_2022,
abstract = {The brain-inspired spiking neural networks (SNNs) hold the advantages of lower power consumption and powerful computing capability. However, the lack of effective learning algorithms has obstructed the theoretical advance and applications of SNNs. The majority of the existing learning algorithms for SNNs are based on the synaptic weight adjustment. However, neuroscience findings confirm that synaptic delays can also be modulated to play an important role in the learning process. Here, we propose a gradient descent-based learning algorithm for synaptic delays to enhance the sequential learning performance of single spiking neuron. Moreover, we extend the proposed method to multilayer SNNs with spike temporal-based error backpropagation. In the proposed multilayer learning algorithm, information is encoded in the relative timing of individual neuronal spikes, and learning is performed based on the exact derivatives of the postsynaptic spike times with respect to presynaptic spike times. Experimental results on both synthetic and realistic datasets show significant improvements in learning efficiency and accuracy over the existing spike temporal-based learning algorithms. We also evaluate the proposed learning method in an SNN-based multimodal computational model for audiovisual pattern recognition, and it achieves better performance compared with its counterparts.},
author = {Luo, Xiaoling and Qu, Hong and Wang, Yuchen and Yi, Zhang and Zhang, Jilun and Zhang, Malu},
doi = {10.1109/TNNLS.2022.3164930},
issn = {2162-2388},
journal = {IEEE Transactions on Neural Networks and Learning Systems},
keywords = {\#nosource, Backpropagation, Biological system modeling, Delays, Heuristic algorithms, Membrane potentials, Neurons, Nonhomogeneous media, spike neural networks, spike neurons, supervised learning, synaptic delay plasticity., ⛔ No INSPIRE recid found},
pages = {1--13},
title = {Supervised {Learning} in {Multilayer} {Spiking} {Neural} {Networks} {With} {Spike} {Temporal} {Error} {Backpropagation}},
year = {2022},
bdsk-url-1 = {https://doi.org/10.1109/TNNLS.2022.3164930}}
@article{macdonald_neuromorphic_2022,
abstract = {Dexterous manipulation in robotic hands relies on an accurate sense of artificial touch. Here we investigate neuromorphic tactile sensation with an event-based optical tactile sensor combined with spiking neural networks for edge orientation detection. The sensor incorporates an event-based vision system (mini-eDVS) into a low-form factor artificial fingertip (the NeuroTac). The processing of tactile information is performed through a Spiking Neural Network with unsupervised Spike-Timing-Dependent Plasticity (STDP) learning, and the resultant output is classified with a 3-nearest neighbours classifier. Edge orientations were classified in 10-degree increments while tapping vertically downward and sliding horizontally across the edge. In both cases, we demonstrate that the sensor is able to reliably detect edge orientation, and could lead to accurate, bio-inspired, tactile processing in robotics and prosthetics applications.},
author = {Macdonald, Fraser L. A. and Lepora, Nathan F. and Conradt, J{\"o}rg and Ward-Cherrier, Benjamin},
copyright = {http://creativecommons.org/licenses/by/3.0/},
doi = {10.3390/s22186998},
issn = {1424-8220},
journal = {Sensors},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
language = {en},
month = jan,
note = {Number: 18 Publisher: Multidisciplinary Digital Publishing Institute},
number = {18},
pages = {6998},
title = {Neuromorphic {Tactile} {Edge} {Orientation} {Classification} in an {Unsupervised} {Spiking} {Neural} {Network}},
url = {https://www.mdpi.com/1424-8220/22/18/6998},
urldate = {2022-09-26},
volume = {22},
year = {2022},
bdsk-url-1 = {https://www.mdpi.com/1424-8220/22/18/6998},
bdsk-url-2 = {https://doi.org/10.3390/s22186998}}
@article{malvache_awake_2016,
abstract = {The chained activation of neuronal assemblies is thought to support major cognitive processes, including memory. In the hippocampus, this is observed during population bursts often associated with sharp-wave ripples, in the form of an ordered reactivation of neurons. However, the organization and lifetime of these assemblies remain unknown. We used calcium imaging to map patterns of synchronous neuronal activation in the CA1 region of awake mice during runs on a treadmill. The patterns were composed of the recurring activation of anatomically intermingled, but functionally orthogonal, assemblies. These assemblies reactivated discrete temporal segments of neuronal sequences observed during runs and could be stable across consecutive days. A binding of these assemblies into longer chains revealed temporally ordered replay. These modules may represent the default building blocks for encoding or retrieving experience.},
author = {Malvache, Arnaud and Reichinnek, Susanne and Villette, Vincent and Haimerl, Caroline and Cossart, Rosa},
doi = {10.1126/science.aaf3319},
issn = {1095-9203},
journal = {Science (New York, N.Y.)},
keywords = {\#nosource, Animals, Brain Mapping, CA1 Region, CA1 Region, Hippocampal, Calcium Signaling, Exercise Test, Hippocampal, Male, Mice, Nerve Net, Neurons, Running, Wakefulness, ⛔ No INSPIRE recid found},
language = {eng},
month = sep,
number = {6305},
pages = {1280--1283},
title = {Awake hippocampal reactivations project onto orthogonal neuronal assemblies},
volume = {353},
year = {2016},
bdsk-url-1 = {https://doi.org/10.1126/science.aaf3319}}
@book{mandelbrot_fractal_1982,
abstract = {Rev. ed. of: Fractals. c1977; Includes indexes; Bibliography: p. [425]-443},
author = {Mandelbrot, Benoit B.},
collaborator = {{Internet Archive}},
isbn = {978-0-7167-1186-5},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
language = {eng},
publisher = {San Francisco : W.H. Freeman},
title = {The fractal geometry of nature},
url = {http://archive.org/details/fractalgeometryo00beno},
urldate = {2022-09-27},
year = {1982},
bdsk-url-1 = {http://archive.org/details/fractalgeometryo00beno}}
@inproceedings{mansour_pour_speed_2018,
abstract = {It is still not fully understood how visual system integrates motion energy across different spatial and temporal frequencies to build a coherent percept of the global motion under the complex, noisy naturalistic conditions. We addressed this question by manipulating local speed variability distribution (i. e. speed bandwidth) using a well-controlled class of broadband random-texture stimuli called Motion Clouds (MCs) with continuous naturalistic spatiotemporal frequency spectra (Sanz-Leon et al., 2012, ; Simoncini et al., 2012). In a first 2AFC experiment on speed discrimination, participants had to compare the speed of a broad speed bandwidth MC (range: 0.05-8 $^{\textrm{∘}}$/s) moving at 1 of 5 possible mean speeds (ranging from 5 to 13 $^{\textrm{∘}}$/s) to that of another MC with a small speed bandwidth (SD: 0.05 $^{\textrm{∘}}$/s), always moving at a mean speed of 10 $^{\textrm{∘}}$/s . We found that MCs with larger speed bandwidth (between 0.05-0.5 $^{\textrm{∘}}$/s) were perceived moving faster. Within this range, speed uncertainty results in over-estimating stimulus velocity. However, beyond a critical bandwidth (SD: 0.5 $^{\textrm{∘}}$/s), perception of a coherent speed was lost. In a second 2AFC experiment on direction discrimination, participants had to estimate the motion direction of moving MCs with different speed bandwidths. We found that for large band MCs participant could no longer discriminate motion direction. These results suggest that when increasing speed bandwidth from small to large range, the observer experiences different perceptual regimes. We then decided to run a Maximum Likelihood Difference Scaling (Knoblauch \& Maloney, 2008) experiment with our speed bandwidth stimuli to investigate these different possible perceptual regimes. We identified three regimes within this space that correspond to motion coherency, motion transparency and motion incoherency. These results allow to further characterize the shape of the interactions kernel observed between different speed tuned channels and different spatiotemporal scales (Gekas et al ., 2017) that underlies global velocity estimation.},
author = {Mansour Pour, Kiana and Gekas, Nikos and Mamassian, Pascal and Perrinet, Laurent U and Montagnini, Anna and Masson, Guillaume S},
booktitle = {Journal of {Vision}, {Vol}.18, 345, proceedings of {VSS}},
copyright = {All rights reserved},
doi = {10.1167/18.10.345},
keywords = {\#nosource, motion detection, ⛔ No INSPIRE recid found},
title = {Speed uncertainty and motion perception with naturalistic random textures},
url = {https://laurentperrinet.github.io/publication/mansour-18-vss},
year = {2018},
bdsk-url-1 = {https://laurentperrinet.github.io/publication/mansour-18-vss},
bdsk-url-2 = {https://doi.org/10.1167/18.10.345}}
@article{maro_event-based_2020,
abstract = {In this paper, we introduce a framework for dynamic gesture recognition with background suppression operating on the output of a moving event-based camera. The system is developed to operate in real-time using only the computational capabilities of a mobile phone. It introduces a new development around the concept of time-surfaces. It also presents a novel event-based methodology to dynamically remove backgrounds that uses the high temporal resolution properties of event-based cameras. To our knowledge, this is the first Android event-based framework for vision-based recognition of dynamic gestures running on a smartphone without off-board processing. We assess the performances by considering several scenarios in both indoors and outdoors, for static and dynamic conditions, in uncontrolled lighting conditions. We also introduce a new event-based dataset for gesture recognition with static and dynamic backgrounds (made publicly available). The set of gestures has been selected following a clinical trial to allow human-machine interaction for the visually impaired and older adults. We finally report comparisons with prior work that addressed event-based gesture recognition reporting comparable results, without the use of advanced classification techniques nor power greedy hardware.},
author = {Maro, Jean-Matthieu and Ieng, Sio-Hoi and Benosman, Ryad},
doi = {10.3389/fnins.2020.00275},
issn = {1662-453X},
journal = {Frontiers in neuroscience},
keywords = {\#nosource, Background Suppression, Dynamic Gesture Recognition, Dynamic Vision Sensor (Dvs), Event-based, Gesture Recognition, Mobile Device, Neuromorphic, Smartphone, ⛔ No INSPIRE recid found},
language = {eng},
month = jan,
pages = {275},
title = {Event-{Based} {Gesture} {Recognition} {With} {Dynamic} {Background} {Suppression} {Using} {Smartphone} {Computational} {Capabilities}},
url = {https://europepmc.org/articles/PMC7160298},
urldate = {2022-09-28},
volume = {14},
year = {2020},
bdsk-url-1 = {https://europepmc.org/articles/PMC7160298},
bdsk-url-2 = {https://doi.org/10.3389/fnins.2020.00275}}
@article{masquelier_competitive_2009,
author = {Masquelier, Timoth{\'e}e and Guyonneau, Rudy and Thorpe, Simon J.},
doi = {10.1162/neco.2008.06-08-804},
issn = {0899-7667, 1530-888X},
journal = {Neural Computation},
keywords = {\#nosource, ⛔ No INSPIRE recid found},
language = {en},
month = may,