-
Notifications
You must be signed in to change notification settings - Fork 2
/
glia-ihc-bibliography.bib
129 lines (116 loc) · 10.5 KB
/
glia-ihc-bibliography.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
@article{schindelin_fiji_2012,
title = {Fiji: {An} {Open}-{Source} {Platform} for {Biological}-{Image} {Analysis}},
volume = {9},
issn = {1548-7105},
shorttitle = {Fiji},
doi = {10.1038/nmeth.2019},
abstract = {Fiji is a distribution of the popular open-source software ImageJ focused on biological-image analysis. Fiji uses modern software engineering practices to combine powerful software libraries with a broad range of scripting languages to enable rapid prototyping of image-processing algorithms. Fiji facilitates the transformation of new algorithms into ImageJ plugins that can be shared with end users through an integrated update system. We propose Fiji as a platform for productive collaboration between computer science and biology research communities.},
language = {eng},
number = {7},
journal = {Nature Methods},
author = {Schindelin, Johannes and Arganda-Carreras, Ignacio and Frise, Erwin and Kaynig, Verena and Longair, Mark and Pietzsch, Tobias and Preibisch, Stephan and Rueden, Curtis and Saalfeld, Stephan and Schmid, Benjamin and Tinevez, Jean-Yves and White, Daniel James and Hartenstein, Volker and Eliceiri, Kevin and Tomancak, Pavel and Cardona, Albert},
month = jun,
year = {2012},
pmid = {22743772},
pmcid = {PMC3855844},
keywords = {Algorithms, Animals, Brain, Computational Biology, Drosophila melanogaster, Image Enhancement, Image Processing, Computer-Assisted, Imaging, Three-Dimensional, Information Dissemination, Software, Software Design},
pages = {676--682},
}
@article{rueden_imagej2_2017,
title = {{ImageJ2}: {ImageJ} for the next generation of scientific image data},
volume = {18},
issn = {1471-2105},
shorttitle = {{ImageJ2}},
url = {https://doi.org/10.1186/s12859-017-1934-z},
doi = {10.1186/s12859-017-1934-z},
abstract = {ImageJ is an image analysis program extensively used in the biological sciences and beyond. Due to its ease of use, recordable macro language, and extensible plug-in architecture, ImageJ enjoys contributions from non-programmers, amateur programmers, and professional developers alike. Enabling such a diversity of contributors has resulted in a large community that spans the biological and physical sciences. However, a rapidly growing user base, diverging plugin suites, and technical limitations have revealed a clear need for a concerted software engineering effort to support emerging imaging paradigms, to ensure the software’s ability to handle the requirements of modern science.},
number = {1},
urldate = {2021-02-28},
journal = {BMC Bioinformatics},
author = {Rueden, Curtis T. and Schindelin, Johannes and Hiner, Mark C. and DeZonia, Barry E. and Walter, Alison E. and Arena, Ellen T. and Eliceiri, Kevin W.},
month = nov,
year = {2017},
keywords = {Extensibility, Image processing, ImageJ, ImageJ2, Interoperability, N-dimensional, Open development, Open source, Reproducibility},
pages = {529},
}
@incollection{paszke_pytorch_2019,
title = {{PyTorch}: {An} {Imperative} {Style}, {High}-{Performance} {Deep} {Learning} {Library}},
url = {http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf},
booktitle = {Advances in {Neural} {Information} {Processing} {Systems} 32},
publisher = {Curran Associates, Inc.},
author = {Paszke, Adam and Gross, Sam and Massa, Francisco and Lerer, Adam and Bradbury, James and Chanan, Gregory and Killeen, Trevor and Lin, Zeming and Gimelshein, Natalia and Antiga, Luca and Desmaison, Alban and Kopf, Andreas and Yang, Edward and DeVito, Zachary and Raison, Martin and Tejani, Alykhan and Chilamkurthy, Sasank and Steiner, Benoit and Fang, Lu and Bai, Junjie and Chintala, Soumith},
editor = {Wallach, H. and Larochelle, H. and Beygelzimer, A. and Alché-Buc, F. and Fox, E. and Garnett, R.},
year = {2019},
pages = {8024--8035},
}
@misc{kokhlikyan_pytorch_2019,
title = {{PyTorch} {Captum}},
url = {https://github.com/pytorch/captum},
publisher = {GitHub},
author = {Kokhlikyan, Narine and Miglani, Vivek and Martin, Miguel and Wang, Edward and Reynolds, Jonathan and Melnikov, Alexander and Lunova, Natalia and Reblitz-Richardson, Orion},
year = {2019},
}
@article{kingma_adam_2017,
title = {Adam: {A} {Method} for {Stochastic} {Optimization}},
shorttitle = {Adam},
url = {http://arxiv.org/abs/1412.6980},
abstract = {We introduce Adam, an algorithm for first-order gradient-based optimization of stochastic objective functions, based on adaptive estimates of lower-order moments. The method is straightforward to implement, is computationally efficient, has little memory requirements, is invariant to diagonal rescaling of the gradients, and is well suited for problems that are large in terms of data and/or parameters. The method is also appropriate for non-stationary objectives and problems with very noisy and/or sparse gradients. The hyper-parameters have intuitive interpretations and typically require little tuning. Some connections to related algorithms, on which Adam was inspired, are discussed. We also analyze the theoretical convergence properties of the algorithm and provide a regret bound on the convergence rate that is comparable to the best known results under the online convex optimization framework. Empirical results demonstrate that Adam works well in practice and compares favorably to other stochastic optimization methods. Finally, we discuss AdaMax, a variant of Adam based on the infinity norm.},
urldate = {2021-03-03},
journal = {arXiv:1412.6980 [cs]},
author = {Kingma, Diederik P. and Ba, Jimmy},
month = jan,
year = {2017},
note = {arXiv: 1412.6980},
keywords = {Computer Science - Machine Learning},
annote = {Comment: Published as a conference paper at the 3rd International Conference for Learning Representations, San Diego, 2015},
}
@article{akiba_optuna_2019,
title = {Optuna: {A} {Next}-generation {Hyperparameter} {Optimization} {Framework}},
shorttitle = {Optuna},
url = {http://arxiv.org/abs/1907.10902},
abstract = {The purpose of this study is to introduce new design-criteria for next-generation hyperparameter optimization software. The criteria we propose include (1) define-by-run API that allows users to construct the parameter search space dynamically, (2) efficient implementation of both searching and pruning strategies, and (3) easy-to-setup, versatile architecture that can be deployed for various purposes, ranging from scalable distributed computing to light-weight experiment conducted via interactive interface. In order to prove our point, we will introduce Optuna, an optimization software which is a culmination of our effort in the development of a next generation optimization software. As an optimization software designed with define-by-run principle, Optuna is particularly the first of its kind. We will present the design-techniques that became necessary in the development of the software that meets the above criteria, and demonstrate the power of our new design through experimental results and real world applications. Our software is available under the MIT license (https://github.com/pfnet/optuna/).},
urldate = {2021-02-28},
journal = {arXiv:1907.10902 [cs, stat]},
author = {Akiba, Takuya and Sano, Shotaro and Yanase, Toshihiko and Ohta, Takeru and Koyama, Masanori},
month = jul,
year = {2019},
note = {arXiv: 1907.10902},
keywords = {Computer Science - Machine Learning, Statistics - Machine Learning},
annote = {Comment: 10 pages, Accepted at KDD 2019 Applied Data Science track},
}
@article{bergstra_algorithms_2011,
title = {Algorithms for {Hyper}-{Parameter} {Optimization}},
volume = {24},
url = {https://proceedings.neurips.cc/paper/2011/hash/86e8f7ab32cfd12577bc2619bc635690-Abstract.html},
language = {en},
urldate = {2021-02-28},
journal = {Advances in Neural Information Processing Systems},
author = {Bergstra, James and Bardenet, Rémi and Bengio, Yoshua and Kégl, Balázs},
year = {2011},
file = {Full Text PDF:C\:\\Users\\ayush\\Zotero\\storage\\HMJCBGI6\\Bergstra et al. - 2011 - Algorithms for Hyper-Parameter Optimization.pdf:application/pdf;Snapshot:C\:\\Users\\ayush\\Zotero\\storage\\Y47DXW5S\\86e8f7ab32cfd12577bc2619bc635690-Abstract.html:text/html},
}
@article{pedregosa_scikit_2011,
title = {Scikit-learn: {Machine} {Learning} in {Python}},
volume = {12},
shorttitle = {Scikit-learn},
url = {http://jmlr.org/papers/v12/pedregosa11a.html},
abstract = {Scikit-learn is a Python module integrating a wide range of state-of-the-art machine learning algorithms for medium-scale supervised and unsupervised problems. This package focuses on bringing machine learning to non-specialists using a general-purpose high-level language. Emphasis is put on ease of use, performance, documentation, and API consistency. It has minimal dependencies and is distributed under the simplified BSD license, encouraging its use in both academic and commercial settings. Source code, binaries, and documentation can be downloaded from http://scikit-learn.sourceforge.net.},
number = {85},
urldate = {2021-02-28},
journal = {Journal of Machine Learning Research},
author = {Pedregosa, Fabian and Varoquaux, Gaël and Gramfort, Alexandre and Michel, Vincent and Thirion, Bertrand and Grisel, Olivier and Blondel, Mathieu and Prettenhofer, Peter and Weiss, Ron and Dubourg, Vincent and Vanderplas, Jake and Passos, Alexandre and Cournapeau, David and Brucher, Matthieu and Perrot, Matthieu and Duchesnay, Édouard},
year = {2011},
pages = {2825--2830},
}
@article{sundararajan_axiomatic_2017,
title = {Axiomatic {Attribution} for {Deep} {Networks}},
url = {http://arxiv.org/abs/1703.01365},
abstract = {We study the problem of attributing the prediction of a deep network to its input features, a problem previously studied by several other works. We identify two fundamental axioms---Sensitivity and Implementation Invariance that attribution methods ought to satisfy. We show that they are not satisfied by most known attribution methods, which we consider to be a fundamental weakness of those methods. We use the axioms to guide the design of a new attribution method called Integrated Gradients. Our method requires no modification to the original network and is extremely simple to implement; it just needs a few calls to the standard gradient operator. We apply this method to a couple of image models, a couple of text models and a chemistry model, demonstrating its ability to debug networks, to extract rules from a network, and to enable users to engage with models better.},
urldate = {2021-02-28},
journal = {arXiv:1703.01365 [cs]},
author = {Sundararajan, Mukund and Taly, Ankur and Yan, Qiqi},
month = jun,
year = {2017},
note = {arXiv: 1703.01365},
keywords = {Computer Science - Machine Learning},
}