diff --git a/.gitattributes b/.gitattributes
deleted file mode 100644
index 4ce3503..0000000
--- a/.gitattributes
+++ /dev/null
@@ -1 +0,0 @@
-*.rst whitespace=tab-in-indent conflict-marker-size=79
diff --git a/.github/workflows/add-to-dashboard.yml b/.github/workflows/add-to-dashboard.yml
deleted file mode 100644
index e72d989..0000000
--- a/.github/workflows/add-to-dashboard.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-name: Add Issue or Pull Request to Dashboard
-
-on:
- issues:
- types:
- - opened
- pull_request:
- types:
- - opened
-
-jobs:
- add-to-project:
- name: Add issue or pull request to project
- runs-on: ubuntu-latest
- steps:
- - uses: actions/add-to-project@v0.5.0
- with:
- project-url: https://github.com/orgs/catalystneuro/projects/3
- github-token: ${{ secrets.PROJECT_TOKEN }}
diff --git a/.github/workflows/check_external_links.yml b/.github/workflows/check_external_links.yml
new file mode 100644
index 0000000..9dd1a84
--- /dev/null
+++ b/.github/workflows/check_external_links.yml
@@ -0,0 +1,32 @@
+name: Check Sphinx external links
+on:
+ push:
+ schedule:
+ - cron: '0 5 * * 0' # once every Sunday at midnight ET
+ workflow_dispatch:
+
+jobs:
+ check-external-links:
+ name: Check for broken Sphinx external links
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0 # tags are required to determine the version
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+
+ - name: Install Sphinx dependencies and package
+ run: |
+ python -m pip install --upgrade pip
+ python -m pip install -r requirements-dev.txt
+ python -m pip install .
+
+ - name: Check Sphinx external links
+ run: |
+ cd docs # run_doc_autogen assumes spec is found in ../spec/
+ sphinx-build -b linkcheck ./source ./test_build
diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
index 6575870..314b085 100644
--- a/.github/workflows/codespell.yml
+++ b/.github/workflows/codespell.yml
@@ -1,6 +1,6 @@
name: Codespell
on:
- pull_request:
+ push:
workflow_dispatch:
jobs:
@@ -9,6 +9,6 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Codespell
uses: codespell-project/actions-codespell@v2
diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml
new file mode 100644
index 0000000..9b4f05d
--- /dev/null
+++ b/.github/workflows/ruff.yml
@@ -0,0 +1,14 @@
+name: Ruff
+on:
+ push:
+ workflow_dispatch:
+
+jobs:
+ ruff:
+ name: Check for style errors and common problems
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Ruff
+ uses: chartboost/ruff-action@v1
diff --git a/.github/workflows/run_all_tests.yml b/.github/workflows/run_all_tests.yml
index 8f16226..6292cad 100644
--- a/.github/workflows/run_all_tests.yml
+++ b/.github/workflows/run_all_tests.yml
@@ -1,6 +1,6 @@
name: Run all tests
on:
- pull_request:
+ push:
schedule:
- cron: '0 5 * * 0' # once every Sunday at midnight ET
workflow_dispatch:
@@ -12,6 +12,9 @@ jobs:
defaults:
run:
shell: bash
+ concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.name }}
+ cancel-in-progress: true
strategy:
fail-fast: false
matrix:
@@ -38,19 +41,13 @@ jobs:
- { name: macos-python3.12 , requirements: pinned , python-ver: "3.12", os: macos-latest }
- { name: macos-python3.12-upgraded , requirements: upgraded , python-ver: "3.12", os: macos-latest }
steps:
- - name: Cancel non-latest runs
- uses: styfle/cancel-workflow-action@0.11.0
+ - name: Checkout repo
+ uses: actions/checkout@v4
with:
- all_but_latest: true
- access_token: ${{ github.token }}
-
- - uses: actions/checkout@v3
- with:
- submodules: 'recursive'
- fetch-depth: 0 # fetch tags
+ fetch-depth: 0 # tags are required to determine the version
- name: Set up Python
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-ver }}
@@ -64,19 +61,20 @@ jobs:
if: ${{ matrix.requirements == 'minimum' }}
run: |
python -m pip install -r requirements-min.txt -r requirements-dev.txt
- python -m pip install -e .
+ python -m pip install .
- name: Install run requirements (pinned)
if: ${{ matrix.requirements == 'pinned' }}
run: |
python -m pip install -r requirements-dev.txt
- python -m pip install -e .
+ python -m pip install .
- name: Install run requirements (upgraded)
if: ${{ matrix.requirements == 'upgraded' }}
run: |
python -m pip install -r requirements-dev.txt
- python -m pip install -U -e .
+ # force upgrade of all dependencies to latest versions within allowed range
+ python -m pip install -U --upgrade-strategy eager .
- name: Run tests
run: |
@@ -110,6 +108,9 @@ jobs:
defaults:
run:
shell: bash -l {0} # needed for conda environment to work
+ concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.name }}
+ cancel-in-progress: true
strategy:
fail-fast: false
matrix:
@@ -122,18 +123,13 @@ jobs:
- { name: conda-linux-python3.12 , requirements: pinned , python-ver: "3.12", os: ubuntu-latest }
- { name: conda-linux-python3.12-upgraded , requirements: upgraded , python-ver: "3.12", os: ubuntu-latest }
steps:
- - name: Cancel any previous incomplete runs
- uses: styfle/cancel-workflow-action@0.11.0
- with:
- access_token: ${{ github.token }}
-
- - uses: actions/checkout@v3
+ - name: Checkout repo
+ uses: actions/checkout@v4
with:
- submodules: 'recursive'
- fetch-depth: 0 # fetch tags
+ fetch-depth: 0 # tags are required to determine the version
- name: Set up Conda
- uses: conda-incubator/setup-miniconda@v2
+ uses: conda-incubator/setup-miniconda@v3
with:
auto-update-conda: true
auto-activate-base: true
@@ -151,19 +147,20 @@ jobs:
if: ${{ matrix.requirements == 'minimum' }}
run: |
python -m pip install -r requirements-min.txt -r requirements-dev.txt
- python -m pip install -e .
+ python -m pip install .
- name: Install run requirements (pinned)
if: ${{ matrix.requirements == 'pinned' }}
run: |
python -m pip install -r requirements-dev.txt
- python -m pip install -e .
+ python -m pip install .
- name: Install run requirements (upgraded)
if: ${{ matrix.requirements == 'upgraded' }}
run: |
python -m pip install -r requirements-dev.txt
- python -m pip install -U -e .
+ # force upgrade of all dependencies to latest versions within allowed range
+ python -m pip install -U --upgrade-strategy eager .
- name: Run tests
run: |
diff --git a/.github/workflows/run_coverage.yml b/.github/workflows/run_coverage.yml
index 4883bf4..5aa6f9e 100644
--- a/.github/workflows/run_coverage.yml
+++ b/.github/workflows/run_coverage.yml
@@ -1,6 +1,6 @@
name: Run code coverage
on:
- pull_request:
+ push:
workflow_dispatch:
jobs:
@@ -13,26 +13,23 @@ jobs:
defaults:
run:
shell: bash
+ concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.os }}
+ cancel-in-progress: true
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
env: # used by codecov-action
OS: ${{ matrix.os }}
- PYTHON: '3.11'
+ PYTHON: '3.12'
steps:
- - name: Cancel any previous incomplete runs
- uses: styfle/cancel-workflow-action@0.11.0
+ - name: Checkout repo
+ uses: actions/checkout@v4
with:
- all_but_latest: true
- access_token: ${{ github.token }}
-
- - uses: actions/checkout@v3
- with:
- submodules: 'recursive'
- fetch-depth: 0 # fetch tags
+ fetch-depth: 0 # tags are required to determine the version
- name: Set up Python
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON }}
@@ -43,17 +40,18 @@ jobs:
- name: Install package
run: |
- python -m pip install -e . # must install in editable mode for coverage to find sources
+ python -m pip install .
python -m pip list
- name: Run tests and generate coverage report
run: |
- pytest --cov
- python -m coverage xml # codecov uploader requires xml format
- python -m coverage report -m
+ pytest --cov --cov-report=xml --cov-report=term # codecov uploader requires xml format
- # TODO uncomment after setting up repo on codecov.io
+ # TODO uncomment after setting up repo on codecov.io and adding token
# - name: Upload coverage to Codecov
- # uses: codecov/codecov-action@v3
+ # uses: codecov/codecov-action@v4
# with:
# fail_ci_if_error: true
+ # file: ./coverage.xml
+ # env:
+ # CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
diff --git a/.github/workflows/validate_schema.yml b/.github/workflows/validate_schema.yml
new file mode 100644
index 0000000..d050de1
--- /dev/null
+++ b/.github/workflows/validate_schema.yml
@@ -0,0 +1,22 @@
+name: Validate schema
+
+on: [push, pull_request, workflow_dispatch]
+
+jobs:
+ validate:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python 3.12
+ uses: actions/setup-python@v4
+ with:
+ python-version: "3.12"
+ - name: Install HDMF
+ run: |
+ pip install hdmf
+ - name: Download latest nwb schema language specification
+ run: |
+ curl -L https://raw.githubusercontent.com/NeurodataWithoutBorders/nwb-schema/dev/nwb.schema.json -o nwb.schema.json
+ - name: Validate schema specification
+ run: |
+ validate_hdmf_spec spec -m nwb.schema.json
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index a40ad0b..fac0f30 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,9 +4,6 @@
# generated docs
docs/source/_format_auto_docs
-# copied spec files
-src/pynwb/ndx_microscopy/spec/*.yaml
-
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
@@ -29,6 +26,7 @@ parts/
sdist/
var/
wheels/
+share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
@@ -47,14 +45,18 @@ pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
+.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
+*.py,cover
.hypothesis/
.pytest_cache/
+cover/
+.ruff_cache/
# Translations
*.mo
@@ -64,6 +66,7 @@ coverage.xml
*.log
local_settings.py
db.sqlite3
+db.sqlite3-journal
# Flask stuff:
instance/
@@ -76,16 +79,49 @@ instance/
docs/_build/
# PyBuilder
+.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
-# pyenv
-.python-version
+# IPython
+profile_default/
+ipython_config.py
-# celery beat schedule file
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/#use-with-ide
+.pdm.toml
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
celerybeat-schedule
+celerybeat.pid
# SageMath parsed files
*.sage.py
@@ -111,9 +147,24 @@ venv.bak/
# mypy
.mypy_cache/
+.dmypy.json
+dmypy.json
-# Mac finder
-.DS_Store
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
# PyCharm
-.idea/
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+#.idea/
+
+# Mac finder
+.DS_Store
diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000..13566b8
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000..d843f34
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
deleted file mode 100644
index e549191..0000000
--- a/.pre-commit-config.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-repos:
-- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.6.0
- hooks:
- - id: check-yaml
- - id: check-toml
- - id: end-of-file-fixer
- - id: trailing-whitespace
- - id: check-added-large-files
-- repo: https://github.com/psf/black
- rev: 24.8.0
- hooks:
- - id: black
-- repo: https://github.com/PyCQA/isort
- rev: 5.13.2
- hooks:
- - id: isort
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..db3ef69
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1 @@
+# Changelog for ndx-microscopy
diff --git a/LICENSE.txt b/LICENSE.txt
index 636d369..fc051b1 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1,21 +1,29 @@
-MIT License
+BSD 3-Clause License
-Copyright (c) 2023 Cody Baker and Alessandra Trapani
+Copyright (c) 2024, Alessandra Trapani, Cody Baker
+All rights reserved.
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/MANIFEST.in b/MANIFEST.in
deleted file mode 100644
index 079351b..0000000
--- a/MANIFEST.in
+++ /dev/null
@@ -1,11 +0,0 @@
-include LICENSE.txt
-include README.md
-include requirements.txt
-
-include spec/*.yaml
-
-recursive-include tests *
-recursive-exclude * __pycache__
-recursive-exclude * *.py[co]
-
-recursive-include docs *.css *.rst conf.py conf_doc_autogen.py Makefile make.bat README.md
diff --git a/NEXTSTEPS.md b/NEXTSTEPS.md
index 0faf651..e67a62a 100644
--- a/NEXTSTEPS.md
+++ b/NEXTSTEPS.md
@@ -4,12 +4,12 @@
## Creating Your Extension
-1. In a terminal, change directory into the new ndx-microscopy directory.
+1. In a terminal, change directory into the new ndx-microscopy directory: `cd ndx-microscopy`
-2. Add any packages required by your extension to `requirements.txt` and `setup.py`.
+2. Add any packages required by your extension to the `dependencies` key in `pyproject.toml`.
-3. Run `python -m pip install -r requirements.txt -r requirements-dev.txt` to install the `pynwb` package
-and any other packages required to install, develop, and document your extension.
+3. Run `python -m pip install -e .` to install your new extension Python package
+and any other packages required to develop, document, and run your extension.
4. Modify `src/spec/create_extension_spec.py` to define your extension.
@@ -19,16 +19,18 @@ and any other packages required to install, develop, and document your extension
6. Define API classes for your new extension data types.
- - As a starting point, `src/pynwb/__init__.py` includes an example for how to use
- the `pynwb.get_class` to get a basic Python class for your new extension data
+ - As a starting point, `src/pynwb/ndx_microscopy/__init__.py` includes an
+ example for how to use
+ the `pynwb.get_class` to generate a basic Python class for your new extension data
type. This class contains a constructor and properties for the new data type.
- Instead of using `pynwb.get_class`, you can define your own custom class for the
new type, which will allow you to customize the class methods, customize the
- object mapping, and create convenience functions. See
- [https://pynwb.readthedocs.io/en/stable/tutorials/general/extensions.html](https://pynwb.readthedocs.io/en/stable/tutorials/general/extensions.html)
+ object mapping, and create convenience functions. See the
+ [Extending NWB tutorial](https://pynwb.readthedocs.io/en/stable/tutorials/general/extensions.html)
for more details.
-7. Define tests for your new extension data types in `src/pynwb/tests` or `src/matnwb/tests`.
+7. Define tests for your new extension data types in
+`src/pynwb/ndx_microscopy/tests` or `src/matnwb/tests`.
A test for the example `TetrodeSeries` data type is provided as a reference and should be
replaced or removed.
@@ -40,10 +42,26 @@ replaced or removed.
new functions) and **integration tests** (e.g., write the new data types to file, read
the file, and confirm the read data types are equal to the written data types) is
highly encouraged.
-
-8. You may need to modify `setup.py` and re-run `python setup.py install` if you
+ - By default, to aid with debugging, the project is configured NOT to run code coverage as
+ part of the tests.
+ Code coverage reporting is useful to help with creation of tests and report test coverage.
+ However, with this option enabled, breakpoints for debugging with pdb are being ignored.
+ To enable this option for code coverage reporting, uncomment out the following line in
+ your `pyproject.toml`: [line](https://github.com/nwb-extensions/ndx-template/blob/11ae225b3fd3934fa3c56e6e7b563081793b3b43/%7B%7B%20cookiecutter.namespace%20%7D%7D/pyproject.toml#L82-L83
+)
+
+7. (Optional) Define custom visualization widgets for your new extension data types in
+`src/pynwb/ndx_microscopy/widgets` so that the visualizations can be displayed with
+[nwbwidgets](https://github.com/NeurodataWithoutBorders/nwbwidgets).
+You will also need to update the `vis_spec` dictionary in
+`src/pynwb/ndx_microscopy/widgets/__init__.py` so that
+nwbwidgets can find your custom visualizations.
+
+8. You may need to modify `pyproject.toml` and re-run `python -m pip install -e .` if you
use any dependencies.
+9. Update the `CHANGELOG.md` regularly to document changes to your extension.
+
## Documenting and Publishing Your Extension to the Community
@@ -65,16 +83,26 @@ your extension.
7. Add a license file. Permissive licenses should be used if possible. **A [BSD license](https://opensource.org/licenses/BSD-3-Clause) is recommended.**
-8. Make a release for the extension on GitHub with the version number specified. e.g. if version is 0.1.0, then this page should exist: https://github.com/CodyCBakerPhD/ndx-microscopy/releases/tag/0.1.0 . For instructions on how to make a release on GitHub see [here](https://help.github.com/en/github/administering-a-repository/creating-releases).
+8. Update the `CHANGELOG.md` to document changes to your extension.
+
+8. Push your repository to GitHub. A default set of GitHub Actions workflows is set up to
+test your code on Linux, Windows, Mac OS, and Linux using conda; upload code coverage
+stats to codecov.io; check for spelling errors; check for style errors; and check for broken
+links in the documentation. For the code coverage workflow to work, you will need to
+set up the repo on codecov.io and uncomment the "Upload coverage to Codecov" step
+in `.github/workflows/run_coverage.yml`.
+
+8. Make a release for the extension on GitHub with the version number specified. e.g. if version is 0.1.0, then this page should exist: https://github.com/alessandratrapani/ndx-microscopy/releases/tag/0.1.0 . For instructions on how to make a release on GitHub see [here](https://help.github.com/en/github/administering-a-repository/creating-releases).
9. Publish your updated extension on [PyPI](https://pypi.org/).
- - Follow these directions: https://packaging.python.org/tutorials/packaging-projects/
- - You may need to modify `setup.py`
+ - Follow these directions: https://packaging.python.org/en/latest/tutorials/packaging-projects/
+ - You may need to modify `pyproject.toml`
- If your extension version is 0.1.0, then this page should exist: https://pypi.org/project/ndx-microscopy/0.1.0
- Once your GitHub release and ``setup.py`` are ready, publishing on PyPI:
+ Once your GitHub release and `pyproject.toml` are ready, publishing on PyPI:
```bash
- python setup.py sdist bdist_wheel
+ python -m pip install --upgrade build twine
+ python -m build
twine upload dist/*
```
@@ -104,11 +132,12 @@ with information on where to find your NWB extension.
```yaml
name: ndx-microscopy
version: 0.1.0
- src: https://github.com/CodyCBakerPhD/ndx-microscopy
+ src: https://github.com/alessandratrapani/ndx-microscopy
pip: https://pypi.org/project/ndx-microscopy/
- license: MIT
- maintainers:
- - CodyCBakerPhD
+ license: BSD-3
+ maintainers:
+ - alessandratrapani
+ - codycbakerphd
```
14. Edit `staged-extensions/ndx-microscopy/README.md`
diff --git a/README.md b/README.md
index 148426c..b04fce4 100644
--- a/README.md
+++ b/README.md
@@ -1,189 +1,14 @@
# ndx-microscopy Extension for NWB
-An enhancement to core NWB schema types related to microscopy data.
-
-Planned for an eventual NWBEP with the TAB.
-
+An NWB extension to demonstrate the TAB proposal for enhancements to optical physiology neurodata types.
## Installation
-```
-git clone https://github.com/catalystneuro/ndx-microscopy
-pip install ndx-microscopy
-```
-
## Usage
```python
-# TODO
-```
-
-
-## Entity relationship diagram
-
-```mermaid
-%%{init: {'theme': 'base', 'themeVariables': {'primaryColor': '#ffffff', "primaryBorderColor': '#144E73', 'lineColor': '#D96F32'}}}%%
-
-
-classDiagram
- direction BT
-
- class MicroscopySeries {
- <>
-
- --------------------------------------
- links
- --------------------------------------
- microscope : Microscope
- light_source : MicroscopyLightSource
- optical_channel : MicroscopyOpticalChannel
- }
-
- class PlanarMicroscopySeries {
- <>
-
- --------------------------------------
- datasets
- --------------------------------------
- data : numeric, frame x height x width
- --> unit : text
-
- --------------------------------------
- links
- --------------------------------------
- imaging_space : PlanarImagingSpace
- }
-
- class VariableDepthMicroscopySeries {
- <>
-
- --------------------------------------
- datasets
- --------------------------------------
- data : numeric, frame x height x width
- --> unit : text
- depth_per_frame_in_um : numeric, length of frames
-
- --------------------------------------
- links
- --------------------------------------
- imaging_space : PlanarImagingSpace
- }
-
- class VolumetricMicroscopySeries {
- <>
-
- --------------------------------------
- datasets
- --------------------------------------
- data : numeric, frame x height x width x depth
- --> unit : text
-
- --------------------------------------
- links
- --------------------------------------
- imaging_space : VolumetricImageSpace
- }
-
- class ImagingSpace{
- <>
-
- --------------------------------------
- datasets
- --------------------------------------
- description : text
- origin_coordinates : numeric, length 3, optional
- --> unit : text, default="micrometers"
-
- --------------------------------------
- attributes
- --------------------------------------
- location : text, optional
- }
-
- class PlanarImagingSpace{
- <>
-
- --------------------------------------
- datasets
- --------------------------------------
- grid_spacing : numeric, length 2, optional
- --> unit : text, default="micrometers"
-
- --------------------------------------
- attributes
- --------------------------------------
- reference_frame : text, optional
- }
-
- class VolumetricImagingSpace{
- <>
-
- --------------------------------------
- datasets
- --------------------------------------
- grid_spacing : numeric, length 2, optional
- --> unit : text, default="micrometers"
-
- --------------------------------------
- attributes
- --------------------------------------
- reference_frame : text, optional
- }
-
- class MicroscopyOpticalChannel{
- <>
-
- --------------------------------------
- datasets
- --------------------------------------
- description : text
-
- --------------------------------------
- attributes
- --------------------------------------
- indicator : text
- filter_description : text, optional
- emission_wavelength_in_nm : numeric, optional
- }
-
- class MicroscopyLightSource{
- <>
-
- --------------------------------------
- attributes
- --------------------------------------
- model : text, optional
- filter_description : text, optional
- excitation_wavelength_in_nm : numeric, optional
- peak_power_in_W : numeric, optional
- peak_pulse_energy_in_J : numeric, optional
- intensity_in_W_per_m2 : numeric, optional
- exposure_time_in_s : numeric, optional
- pulse_rate_in_Hz : numeric, optional
- }
-
- class Microscope{
- <>
-
- --------------------------------------
- attributes
- --------------------------------------
- model : text, optional
- }
- PlanarMicroscopySeries *-- MicroscopySeries : extends
- PlanarMicroscopySeries -- PlanarImagingSpace : links
- VariableDepthMicroscopySeries *-- MicroscopySeries : extends
- VariableDepthMicroscopySeries -- PlanarImagingSpace : links
- VolumetricMicroscopySeries *-- MicroscopySeries : extends
- VolumetricMicroscopySeries -- VolumetricImagingSpace : links
- PlanarImagingSpace *-- ImagingSpace : extends
- VolumetricImagingSpace *-- ImagingSpace : extends
- MicroscopySeries ..> Microscope : links
- MicroscopySeries ..> MicroscopyLightSource : links
- MicroscopySeries ..> MicroscopyOpticalChannel : links
```
---
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..54e6545
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,179 @@
+
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+SPHINXAPIDOC = sphinx-apidoc
+PAPER =
+BUILDDIR = build
+SRCDIR = ../src
+RSTDIR = source
+CONFDIR = $(PWD)/source
+
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext fulldoc allclean
+
+help:
+ @echo "To update documentation sources from the format specification please use \`make apidoc'"
+ @echo ""
+ @echo "To build the documentation please use \`make ' where is one of"
+ @echo " fulldoc to rebuild the apidoc, html, and latexpdf all at once"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+ @echo " apidoc to to build RST from source code"
+ @echo " clean to clean all documents built by Sphinx in _build"
+ @echo " allclean to clean all autogenerated documents both from Sphinx and apidoc"
+
+allclean:
+ -rm -rf $(BUILDDIR)/* $(RSTDIR)/modules.rst
+ -rm $(RSTDIR)/_format_auto_docs/*.png
+ -rm $(RSTDIR)/_format_auto_docs/*.pdf
+ -rm $(RSTDIR)/_format_auto_docs/*.rst
+ -rm $(RSTDIR)/_format_auto_docs/*.inc
+
+clean:
+ -rm -rf $(BUILDDIR)/* $(RSTDIR)/modules.rst
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/sample.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/sample.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/sample"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/sample"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " "results in $(BUILDDIR)/doctest/output.txt."
+
+apidoc:
+ PYTHONPATH=$(CONFDIR):$(PYTHONPATH) nwb_generate_format_docs
+ @echo
+ @echo "Generate rst source files from NWB spec."
+
+fulldoc:
+ $(MAKE) allclean
+ @echo
+ @echo "Rebuilding apidoc, html, latexpdf"
+ $(MAKE) apidoc
+ $(MAKE) html
+ $(MAKE) latexpdf
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000..9a3a30d
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,121 @@
+
+# Getting started
+
+## Generate Documentation
+
+* To generate the HTML version of your documentation run ``make html``.
+* The [hdmf-docutils](https://pypi.org/project/hdmf-docutils/) package must be installed.
+
+## Customize Your Extension Documentation
+
+* **extension description**
+ * Edit ``source/description.rst`` to describe your extension.
+
+* **release notes**
+ * Edit ``source/release_notes.rst`` to document improvements and fixes of your extension.
+
+* **documentation build settings**
+ * Edit ``source/conf.py`` to customize your extension documentation configuration.
+ * Edit ``source/conf_doc_autogen.py`` to customize the format documentation auto-generation based on
+ the YAML specification files.
+
+
+# Overview
+
+The specification documentation uses Sphinx [http://www.sphinx-doc.org/en/stable/index.html](http://www.sphinx-doc.org/en/stable/index.html)
+
+## Rebuilding All
+
+To rebuild the full documentation in html, latex, and PDF simply run:
+
+```
+make fulldoc
+```
+
+This is a convenience function that is equivalent to:
+
+```
+make allclean
+make apidoc
+make html
+make latexpdf
+```
+
+## Generating the format documentation from the format spec
+
+The format documentation is auto-generated from the format specification (YAML) sources via:
+
+```
+make apidoc
+```
+
+This will invoke the executable:
+
+```
+hdmf_generate_format_docs
+```
+
+The script automatically generates a series of .rst, .png, and .pdf files that are stored in the folder `source/format_auto_docs`. The generated .rst files are included in `source/format.rst` and the png and pdf files are used as figures in the autogenerated docs.
+
+The folder `source/format_auto_docs` is reserved for autogenerated files, i.e., files in the folder should not be added or edited by hand as they will be deleted and rebuilt during the full built of the documentation.
+
+By default the Sphinx configuration is setup to always regenerate the sources whenever the docs are being built (see next section). This behavior can be customized via the `spec_doc_rebuild_always` parameter in `source/conf.py`
+
+## Building a specific document type
+
+To build the documentation, run:
+
+```
+make
+```
+
+where `` is, e.g., `latexpdf`, `html`, `singlehtml`, or `man`. For a complete list of supported doc-types, see:
+
+```
+make help
+```
+
+## Cleaning up
+
+`make clean` cleans up all builds of the documentation located in `_build`.
+
+`make allclean` cleans up all builds of the documentation located in `_build` as well as all autogenerated sources stored in `source/format_auto_docs`.
+
+## Configuration
+
+The build of the documentation can be customized via a broad range of Sphinx options in:
+
+`source/conf_doc_autogen.py`
+
+In addition to standard Sphinx options, there are a number of additional options used to customize the content and structure of the autogenerated documents, e.g.:
+
+* `spec_show_yaml_src` - Boolean indicating whether the YAML sources should be included for the different Neurodata types
+* `spec_generate_src_file` - Boolean indicating whether the YAML sources of the neurodata_types should be rendered in a separate section (True) or in the same location as the main documentation
+* `spec_show_hierarchy_plots` - Boolean indicating whether we should generate and show figures of the hierarchy defined by the specifications as part of the documentation
+* `spec_file_per_type` - Boolean indicating whether we should generate separate .inc reStructuredText for each neurodata_type (True)
+or should all text be added to the main file (False)
+* `spec_show_subgroups_in_tables` - Should subgroups of the main groups be rendered in the table as well. Usually this is disabled since groups are rendered as separate sections in the text
+* `spec_appreviate_main_object_doc_in_tables` - Abbreviate the documentation of the main object for which a table is rendered in the table. This is commonly set to True as doc of the main object is already rendered as the main intro for the section describing the object
+* `spec_show_title_for_tables` - Add a title for the table showing the specifications.
+* `spec_show_subgroups_in_seperate_table` - Should top-level subgroups be listed in a separate table or as part of the main dataset and attributes table
+* `spec_table_depth_char` - Char to be used as prefix to indicate the depth of an object in the specification hierarchy. NOTE: The char used should be supported by LaTeX.
+* `spec_add_latex_clearpage_after_ndt_sections` - Add a LaTeX clearpage after each main section describing a neurodata_type. This helps in LaTeX to keep the ordering of figures, tables, and code blocks consistent in particular when the hierarchy_plots are included.
+* `spec_resolve_type_inc` - Resolve includes to always show the full list of objects that are part of a type (True) or to show only the parts that are actually new to a current type while only linking to base types (False)
+
+In addition, the location of the input format specification can be customized as follows:
+
+* `spec_input_spec_dir` - Directory where the YAML files for the namespace to be documented are located
+* `spec_input_namespace_filename` - Name of the YAML file with the specification of the Namespace to be documented
+* `spec_input_default_namespace` - Name of the default namespace in the file
+
+Finally, the name and location of output files can be customized as follows:
+
+* `spec_output_dir` - Directory where the autogenerated files should be stored
+* `spec_output_master_filename` - Name of the master .rst file that includes all the autogenerated docs
+* `spec_output_doc_filename` - Name of the file where the main documentation goes
+* `spec_output_src_filename` - Name of the file where the sources of the format spec go. NOTE: This file is only generated if `spec_generate_src_file` is enabled
+* `spec_output_doc_type_hierarchy_filename` - Name of the file containing the type hierarchy. (Included in `spec_output_doc_filename`)
+
+In the regular Sphinx `source/conf.py` file, we can then also set:
+
+* `spec_doc_rebuild_always` - Boolean to define whether to always rebuild the source docs from YAML when doing a regular build of the sources (e.g., via `make html`) even if the folder with the source files already exists
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 0000000..dc1312a
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=source
+set BUILDDIR=build
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.https://www.sphinx-doc.org/
+ exit /b 1
+)
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd
diff --git a/docs/source/_static/theme_overrides.css b/docs/source/_static/theme_overrides.css
new file mode 100644
index 0000000..63ee6cc
--- /dev/null
+++ b/docs/source/_static/theme_overrides.css
@@ -0,0 +1,13 @@
+/* override table width restrictions */
+@media screen and (min-width: 767px) {
+
+ .wy-table-responsive table td {
+ /* !important prevents the common CSS stylesheets from overriding
+ this as on RTD they are loaded after this stylesheet */
+ white-space: normal !important;
+ }
+
+ .wy-table-responsive {
+ overflow: visible !important;
+ }
+}
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 0000000..002c717
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,112 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# For the full list of built-in configuration values, see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Project information -----------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
+
+project = 'ndx-microscopy'
+copyright = '2024, Alessandra Trapani, Cody Baker'
+author = 'Alessandra Trapani, Cody Baker'
+
+version = '0.1.0'
+release = 'alpha'
+
+# -- General configuration ---------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
+
+extensions = [
+ 'sphinx.ext.ifconfig',
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.intersphinx',
+]
+
+templates_path = ['_templates']
+exclude_patterns = []
+
+language = 'en'
+
+# -- Options for HTML output -------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
+
+html_theme = 'alabaster'
+html_static_path = ['_static']
+
+# -- Options for intersphinx extension ---------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#configuration
+
+intersphinx_mapping = {
+ 'python': ('https://docs.python.org/3', None),
+}
+
+
+############################################################################
+# CUSTOM CONFIGURATIONS ADDED BY THE NWB TOOL FOR GENERATING FORMAT DOCS
+###########################################################################
+
+import sphinx_rtd_theme # noqa: E402
+import textwrap # noqa: E402
+
+# -- Options for intersphinx ---------------------------------------------
+intersphinx_mapping.update({
+ 'core': ('https://nwb-schema.readthedocs.io/en/latest/', None),
+ 'hdmf-common': ('https://hdmf-common-schema.readthedocs.io/en/latest/', None),
+})
+
+# -- Generate sources from YAML---------------------------------------------------
+# Always rebuild the source docs from YAML even if the folder with the source files already exists
+spec_doc_rebuild_always = True
+
+
+def run_doc_autogen(_):
+ # Execute the autogeneration of Sphinx format docs from the YAML sources
+ import sys
+ import os
+ conf_file_dir = os.path.dirname(os.path.abspath(__file__))
+ sys.path.append(conf_file_dir) # Need so that generate format docs can find the conf_doc_autogen file
+ from conf_doc_autogen import spec_output_dir
+
+ if spec_doc_rebuild_always or not os.path.exists(spec_output_dir):
+ sys.path.append('./docs') # needed to enable import of generate_format docs
+ from hdmf_docutils.generate_format_docs import main as generate_docs
+ generate_docs()
+
+
+def setup(app):
+ app.connect('builder-inited', run_doc_autogen)
+ # overrides for wide tables in RTD theme
+ try:
+ app.add_css_file("theme_overrides.css") # Used by newer Sphinx versions
+ except AttributeError:
+ app.add_stylesheet("theme_overrides.css") # Used by older version of Sphinx
+
+# -- Customize sphinx settings
+numfig = True
+autoclass_content = 'both'
+autodoc_docstring_signature = True
+autodoc_member_order = 'bysource'
+add_function_parentheses = False
+
+
+# -- HTML sphinx options
+html_theme = "sphinx_rtd_theme"
+html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
+
+# LaTeX Sphinx options
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ 'papersize': 'letterpaper',
+
+ # The font size ('10pt', '11pt' or '12pt').
+ 'pointsize': '10pt',
+
+ # Additional stuff for the LaTeX preamble.
+ 'preamble': textwrap.dedent(
+ '''
+ \\setcounter{tocdepth}{3}
+ \\setcounter{secnumdepth}{6}
+ \\usepackage{enumitem}
+ \\setlistdepth{100}
+ '''),
+}
diff --git a/docs/source/conf_doc_autogen.py b/docs/source/conf_doc_autogen.py
new file mode 100644
index 0000000..aed891b
--- /dev/null
+++ b/docs/source/conf_doc_autogen.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+# Configuration file for generating sources for the format documentation from the YAML specification files
+
+import os
+
+# -- Input options for the specification files to be used -----------------------
+
+# Directory where the YAML files for the namespace to be documented are located
+spec_input_spec_dir = '..\spec'
+
+# Name of the YAML file with the specification of the Namespace to be documented
+spec_input_namespace_filename = 'ndx-microscopy.namespace.yaml'
+
+# Name of the default namespace in the file
+spec_input_default_namespace = 'ndx-microscopy'
+
+
+# -- Options for customizing the locations of output files
+
+# Directory where the autogenerated files should be stored
+spec_output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "_format_auto_docs")
+
+# Name of the master rst file that includes all the autogenerated docs
+spec_output_master_filename = 'format_spec_main.inc'
+
+# Name of the file where the main documentation goes
+spec_output_doc_filename = 'format_spec_doc.inc'
+
+# Name of the file where the sources of the format spec go. NOTE: This file is only generated if
+# spec_generate_src_file is enabled
+spec_output_src_filename = 'format_spec_sources.inc'
+
+# Name of the file containing the type hierarchy. (Included in spec_output_doc_filename)
+spec_output_doc_type_hierarchy_filename = 'format_spec_type_hierarchy.inc'
+
+# Clean up the output directory before we build if the git hash is out of date
+spec_clean_output_dir_if_old_git_hash = True
+
+# Do not rebuild the format sources if we have previously build the sources and the git hash matches
+spec_skip_doc_autogen_if_current_git_hash = False
+
+
+# -- Options for the generation of the documentation from source ----------------
+
+# Should the YAML sources be included for the different modules
+spec_show_yaml_src = True
+
+# Show figure of the hierarchy of objects defined by the spec
+spec_show_hierarchy_plots = True
+
+# Should the sources of the neurodata_types (YAML) be rendered in a separate section (True) or
+# in the same location as the base documentation
+spec_generate_src_file = True
+
+# Should separate .inc reStructuredText files be generated for each neurodata_type (True)
+# or should all text be added to the main file
+spec_file_per_type = True
+
+# Should top-level subgroups be listed in a separate table or as part of the main dataset and attributes table
+spec_show_subgroups_in_seperate_table = True
+
+# Abbreviate the documentation of the main object for which a table is rendered in the table.
+# This is commonly set to True as doc of the main object is alrready rendered as the main intro for the
+# section describing the object
+spec_appreviate_main_object_doc_in_tables = True
+
+# Show a title for the tables
+spec_show_title_for_tables = True
+
+# Char to be used as prefix to indicate the depth of an object in the specification hierarchy
+spec_table_depth_char = '.' # '→' '.'
+
+# Add a LaTeX clearpage after each main section describing a neurodata_type. This helps in LaTeX to keep the ordering
+# of figures, tables, and code blocks consistent in particular when the hierarchy_plots are included
+spec_add_latex_clearpage_after_ndt_sections = True
+
+# Resolve includes to always show the full list of objects that are part of a type (True)
+# or to show only the parts that are actually new to a current type while only linking to base types
+spec_resolve_type_inc = False
+
+# Default type map to be used. This is the type map where dependent namespaces are stored. In the case of
+# NWB this is spec_default_type_map = pynwb.get_type_map()
+import pynwb # noqa: E402
+spec_default_type_map = pynwb.get_type_map()
+
+# Default specification classes for groups datasets and namespaces. In the case of NWB these are the NWB-specfic
+# spec classes. In the general cases these are the spec classes from HDMF
+spec_group_spec_cls = pynwb.spec.NWBGroupSpec
+spec_dataset_spec_cls = pynwb.spec.NWBDatasetSpec
+spec_namespace_spec_cls = pynwb.spec.NWBNamespace
diff --git a/docs/source/credits.rst b/docs/source/credits.rst
new file mode 100644
index 0000000..da5cda1
--- /dev/null
+++ b/docs/source/credits.rst
@@ -0,0 +1,21 @@
+*******
+Credits
+*******
+
+.. note::
+ Add the credits for your extension here
+
+Acknowledgments
+===============
+
+
+Authors
+=======
+
+
+*****
+Legal
+*****
+
+License
+=======
diff --git a/docs/source/description.rst b/docs/source/description.rst
new file mode 100644
index 0000000..6f8553e
--- /dev/null
+++ b/docs/source/description.rst
@@ -0,0 +1,5 @@
+Overview
+========
+
+.. note::
+ Add the description of your extension here
diff --git a/docs/source/format.rst b/docs/source/format.rst
new file mode 100644
index 0000000..4b88782
--- /dev/null
+++ b/docs/source/format.rst
@@ -0,0 +1,12 @@
+
+.. _ndx-microscopy:
+
+**************
+ndx-microscopy
+**************
+
+Version |release| |today|
+
+.. .. contents::
+
+.. include:: _format_auto_docs/format_spec_main.inc
diff --git a/docs/source/index.rst b/docs/source/index.rst
new file mode 100644
index 0000000..207c9a0
--- /dev/null
+++ b/docs/source/index.rst
@@ -0,0 +1,30 @@
+Specification for the ndx-microscopy extension
+==============================================
+
+.. toctree::
+ :numbered:
+ :maxdepth: 8
+ :caption: Table of Contents
+
+ description
+
+.. toctree::
+ :numbered:
+ :maxdepth: 3
+ :caption: Extension Specification
+
+ format
+
+.. toctree::
+ :maxdepth: 2
+ :caption: History & Legal
+
+ release_notes
+ credits
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/docs/source/release_notes.rst b/docs/source/release_notes.rst
new file mode 100644
index 0000000..39ccd1c
--- /dev/null
+++ b/docs/source/release_notes.rst
@@ -0,0 +1,5 @@
+Release Notes
+=============
+
+.. note::
+ Add the release notes of your extension here
diff --git a/pyproject.toml b/pyproject.toml
index 2b90ddd..6b63b6a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,25 +1,113 @@
+[build-system]
+requires = ["hatchling"]
+build-backend = "hatchling.build"
+
+[project]
+name = "ndx-microscopy"
+version = "0.1.0"
+authors = [
+ { name="Alessandra Trapani", email="alessandra.trapani@catalystneuro.com" },
+ { name="Cody Baker", email="cody.baker@catalystneuro.com" },
+]
+description = "An NWB extension to demonstrate the TAB proposal for enhancements to optical physiology neurodata types."
+readme = "README.md"
+# requires-python = ">=3.8"
+license = {text = "BSD-3"}
+classifiers = [
+ # TODO: add classifiers before release
+ # "Programming Language :: Python",
+ # "Programming Language :: Python :: 3.8",
+ # "Programming Language :: Python :: 3.9",
+ # "Programming Language :: Python :: 3.10",
+ # "Programming Language :: Python :: 3.11",
+ # "Programming Language :: Python :: 3.12",
+ # "Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ "Intended Audience :: Science/Research",
+ "License :: OSI Approved :: BSD License",
+]
+keywords = [
+ 'NeurodataWithoutBorders',
+ 'NWB',
+ 'nwb-extension',
+ 'ndx-extension',
+]
+dependencies = [
+ "pynwb>=2.8.0",
+ "hdmf>=3.14.1",
+]
+
+# TODO: add URLs before release
+# [project.urls]
+# "Homepage" = "https://github.com/organization/package"
+# "Documentation" = "https://package.readthedocs.io/"
+# "Bug Tracker" = "https://github.com/organization/package/issues"
+# "Discussions" = "https://github.com/organization/package/discussions"
+# "Changelog" = "https://github.com/organization/package/blob/main/CHANGELOG.md"
+
+# Include only the source code under `src/pynwb/ndx_microscopy` and the spec files under `spec`
+# in the wheel.
+[tool.hatch.build.targets.wheel]
+packages = [
+ "src/pynwb/ndx_microscopy",
+ "spec"
+]
+
+# Rewrite the path to the `spec` directory to `ndx_microscopy/spec`.
+# `ndx_microscopy/__init__.py` will look there first for the spec files.
+# The resulting directory structure within the wheel will be:
+# ndx_microscopy/
+# ├── __init__.py
+# ├── spec
+# └── widgets
+[tool.hatch.build.targets.wheel.sources]
+"spec" = "ndx_microscopy/spec"
+
+# The source distribution includes everything in the package except for the `src/matnwb` directory and
+# git and github-related files.
+[tool.hatch.build.targets.sdist]
+exclude = [
+ ".git*",
+ "src/matnwb",
+]
+
+[tool.pytest.ini_options]
+# uncomment below to run pytest always with code coverage reporting. NOTE: breakpoints may not work
+# addopts = "--cov --cov-report html"
+
+[tool.codespell]
+skip = "htmlcov,.git,.mypy_cache,.pytest_cache,.coverage,*.pdf,*.svg,venvs,.tox,hdmf-common-schema,./docs/_build/*,*.ipynb"
+
+[tool.coverage.run]
+branch = true
+source = ["ndx_microscopy"]
+
+[tool.coverage.report]
+exclude_lines = [
+ "pragma: no cover",
+ "@abstract"
+]
+
[tool.black]
line-length = 120
-target-version = ['py39', 'py310', 'py311', 'py312']
-include = '\.pyi?$'
-extend-exclude = '''
-/(
- \.toml
- |\.yml
- |\.txt
- |\.sh
- |\.git
- |\.ini
- | \.hg
- | \.mypy_cache
- | \.tox
- | \.venv
- | build
- | dist
-)/
-'''
-
-[tool.isort]
-profile = "black"
-reverse_relative = true
-known_first_party = ["ndx_microscopy"]
+preview = true
+exclude = ".git|.mypy_cache|.tox|.venv|venv|.ipynb_checkpoints|_build/|dist/|__pypackages__|.ipynb|docs/"
+
+[tool.ruff]
+lint.select = ["E", "F", "T100", "T201", "T203"]
+exclude = [
+ ".git",
+ ".tox",
+ "__pycache__",
+ "build/",
+ "dist/",
+ "docs/source/conf.py",
+]
+line-length = 120
+
+[tool.ruff.lint.per-file-ignores]
+"src/pynwb/ndx_microscopy/__init__.py" = ["E402", "F401"]
+"src/spec/create_extension_spec.py" = ["T201"]
+
+[tool.ruff.lint.mccabe]
+max-complexity = 17
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 9955dec..0db92de 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,2 +1,14 @@
-pytest
-pytest-cov
+# pinned dependencies to reproduce an entire development environment to
+# run tests, check code style, and generate documentation
+black==24.4.2
+codespell==2.3.0
+coverage==7.5.4
+hdmf==3.14.1
+hdmf-docutils==0.4.7
+pre-commit==3.5.0 # latest pre-commit does not support py3.8
+pynwb==2.8.0
+pytest==8.2.2
+pytest-cov==5.0.0
+pytest-subtests==0.12.1
+python-dateutil==2.8.2
+ruff==0.4.10
diff --git a/requirements-min.txt b/requirements-min.txt
index cd0c3e6..d8142c7 100644
--- a/requirements-min.txt
+++ b/requirements-min.txt
@@ -1 +1,5 @@
-pynwb
+# minimum versions of package dependencies for installation
+# these should match the minimum versions specified in pyproject.toml
+# NOTE: it may be possible to relax these minimum requirements
+pynwb==2.8.0
+hdmf==3.14.1
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index f6c4008..0000000
--- a/setup.cfg
+++ /dev/null
@@ -1,17 +0,0 @@
-[wheel]
-universal = 1
-
-[flake8]
-max-line-length = 120
-max-complexity = 17
-exclude =
- .git,
- .tox,
- __pycache__,
- build/,
- dist/,
- docs/source/conf.py
- versioneer.py
-
-[metadata]
-description-file = README.md
diff --git a/setup.py b/setup.py
deleted file mode 100644
index 33da03e..0000000
--- a/setup.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import os
-from shutil import copy2
-
-from setuptools import find_packages, setup
-
-# load README.md/README.rst file
-try:
- if os.path.exists("README.md"):
- with open("README.md", "r") as fp:
- readme = fp.read()
- readme_type = "text/markdown; charset=UTF-8"
- elif os.path.exists("README.rst"):
- with open("README.rst", "r") as fp:
- readme = fp.read()
- readme_type = "text/x-rst; charset=UTF-8"
- else:
- readme = ""
-except Exception:
- readme = ""
-
-setup_args = {
- "name": "ndx-microscopy",
- "version": "0.1.0",
- "description": "An example extension to demonstrate the TAB proposal for enhancements to optical physiology neurodata types.",
- "long_description": readme,
- "long_description_content_type": readme_type,
- "author": "Cody Baker and Alessandra Trapani",
- "author_email": "cody.baker@catalystneuro.com",
- "url": "",
- "license": "MIT",
- "install_requires": [
- "pynwb>=1.5.0,<3",
- "hdmf>=2.5.6,<4",
- ],
- "packages": find_packages("src/pynwb", exclude=["tests", "tests.*"]),
- "package_dir": {"": "src/pynwb"},
- "package_data": {
- "ndx_microscopy": [
- "spec/ndx-microscopy.namespace.yaml",
- "spec/ndx-microscopy.extensions.yaml",
- ]
- },
- "classifiers": [
- "Intended Audience :: Developers",
- "Intended Audience :: Science/Research",
- "License :: OSI Approved :: MIT License",
- ],
- "keywords": ["NeurodataWithoutBorders", "NWB", "nwb-extension", "ndx-extension"],
- "zip_safe": False,
-}
-
-
-def _copy_spec_files(project_dir):
- ns_path = os.path.join(project_dir, "spec", "ndx-microscopy.namespace.yaml")
- ext_path = os.path.join(project_dir, "spec", "ndx-microscopy.extensions.yaml")
-
- dst_dir = os.path.join(project_dir, "src", "pynwb", "ndx_microscopy", "spec")
- if not os.path.exists(dst_dir):
- os.mkdir(dst_dir)
-
- copy2(ns_path, dst_dir)
- copy2(ext_path, dst_dir)
-
-
-if __name__ == "__main__":
- _copy_spec_files(os.path.dirname(__file__))
- setup(**setup_args)
diff --git a/spec/ndx-microscopy.extensions.yaml b/spec/ndx-microscopy.extensions.yaml
index dd0279f..fa9a0ff 100644
--- a/spec/ndx-microscopy.extensions.yaml
+++ b/spec/ndx-microscopy.extensions.yaml
@@ -1,536 +1,8 @@
groups:
-
- - neurodata_type_def: Microscope
- neurodata_type_inc: Device
- doc: A microscope used to acquire imaging data.
- attributes:
- - name: model
- dtype: text
- doc: Model identifier of the light source device.
- required: false
-
- - neurodata_type_def: MicroscopyLightSource
- neurodata_type_inc: Device
- doc: Light source used to illuminate an imaging space.
- attributes:
- - name: model
- dtype: text
- doc: Model identifier of the light source device.
- required: false
- - name: filter_description
- dtype: text
- doc: Filter used to obtain the excitation wavelength of light, e.g. 'Short pass at 1040 nm'.
- required: false
- - name: excitation_wavelength_in_nm
- dtype: numeric
- doc: Excitation wavelength of light, in nanometers.
- required: false
- - name: peak_power_in_W
- dtype: numeric
- doc: Incident power of stimulation device (in Watts).
- required: false
- - name: peak_pulse_energy_in_J
- dtype: numeric
- doc: If device is pulsed light source, pulse energy (in Joules).
- required: false
- - name: intensity_in_W_per_m2
- dtype: numeric
- doc: Intensity of the excitation in W/m^2, if known.
- required: false
- - name: exposure_time_in_s
- dtype: numeric
- doc: Exposure time of the sample (in sec).
- required: false
- - name: pulse_rate_in_Hz
- dtype: numeric
- doc: If device is pulsed light source, pulse rate (in Hz) used for stimulation.
- required: false
-
- # Microscopy is added on to this only to differentiate from the OpticalChannel in the core namespace
- # It would be removed when this structure is merged to core
- - neurodata_type_def: MicroscopyOpticalChannel
- neurodata_type_inc: LabMetaData # Would prefer basic NWBContainer
- doc: An optical channel used to filter light emission from an imaging space.
- datasets:
- - name: description
- doc: Description or other notes about the channel.
- dtype: text
- attributes:
- - name: indicator
- doc: Identifier for the indicator pertaining to this optical channel.
- dtype: text
- - name: filter_description
- doc: Metadata information about the filter used by this optical channel.
- dtype: text
- required: false
- - name: emission_wavelength_in_nm
- doc: Emission wavelength for this optical channel, in nanometers.
- dtype: numeric
- required: false
-
- - neurodata_type_def: ImagingSpace
- neurodata_type_inc: LabMetaData # Would prefer basic NWBContainer
- doc: Metadata about the region of physical space that imaging data was recorded from.
- datasets:
- - name: description
- dtype: text
- doc: Description of the imaging space.
- - name: origin_coordinates
- dtype: float64
- dims:
- - - x, y, z
- shape:
- - - 3
- doc: Physical location in stereotactic coordinates for the first element of the grid.
- See reference_frame to determine what the coordinates are relative to (e.g., bregma).
- quantity: '?'
- attributes:
- - name: unit
- dtype: text
- default_value: micrometers
- doc: Measurement units for origin coordinates. The default value is 'micrometers'.
- attributes:
- - name: location
- dtype: text
- doc: General estimate of location in the brain being subset by this space.
- Specify the area, layer, etc.
- Use standard atlas names for anatomical regions when possible.
- Specify 'whole brain' if the entire brain is strictly contained within the space.
- required: false
- links:
- - name: microscope
- target_type: Microscope
- doc: Link to Microscope object which contains metadata about the device which imaged this space.
-
- - neurodata_type_def: PlanarImagingSpace
- neurodata_type_inc: ImagingSpace
- doc: Metadata about the 2-dimensional slice of physical space that imaging data was recorded from.
- datasets:
- - name: grid_spacing_in_um
- dtype: float64
- dims:
- - - x, y
- shape:
- - - 2
- doc: Amount of space between pixels in micrometers.
- quantity: '?'
- attributes:
- - name: reference_frame
- dtype: text
- doc: Describes the reference frame of origin_coordinates and grid_spacing.
- For example, this can be a text description of the anatomical location and orientation of the grid
- defined by origin_coords and grid_spacing or the vectors needed to transform or rotate the grid to
- a common anatomical axis (e.g., AP/DV/ML).
- This field is necessary to interpret origin_coords and grid_spacing.
- If origin_coords and grid_spacing are not present, then this field is not required.
- For example, if the microscope returns 10 x 10 images, where the first value of the data matrix
- (index (0, 0)) corresponds to (-1.2, -0.6, -2) mm relative to bregma, the spacing between pixels is 0.2 mm in
- x, 0.2 mm in y and 0.5 mm in z, and larger numbers in x means more anterior, larger numbers in y means more
- rightward, and larger numbers in z means more ventral, then enter the following --
- origin_coords = (-1.2, -0.6, -2)
- grid_spacing = (0.2, 0.2)
- reference_frame = "Origin coordinates are relative to bregma. First dimension corresponds to anterior-posterior
- axis (larger index = more anterior). Second dimension corresponds to medial-lateral axis (larger index = more
- rightward). Third dimension corresponds to dorsal-ventral axis (larger index = more ventral)."
- required: false
-
- - neurodata_type_def: VolumetricImagingSpace
- neurodata_type_inc: ImagingSpace
- doc: Metadata about the 3-dimensional region of physical space that imaging data was recorded from.
- datasets:
- - name: grid_spacing_in_um
- doc: Amount of space between voxels in micrometers.
- dtype: float64
- dims:
- - - x, y, z
- shape:
- - - 3
- quantity: '?'
- attributes:
- - name: reference_frame
- doc: Describes the reference frame of origin_coordinates and grid_spacing.
- For example, this can be a text description of the anatomical location and orientation of the grid
- defined by origin_coords and grid_spacing or the vectors needed to transform or rotate the grid to
- a common anatomical axis (e.g., AP/DV/ML).
- This field is necessary to interpret origin_coords and grid_spacing.
- If origin_coords and grid_spacing are not present, then this field is not required.
- For example, if the microscope returns 10 x 10 x 2 images, where the first value of the data matrix
- (index (0, 0, 0)) corresponds to (-1.2, -0.6, -2) mm relative to bregma, the spacing between pixels is 0.2 mm in
- x, 0.2 mm in y and 0.5 mm in z, and larger numbers in x means more anterior, larger numbers in y means more
- rightward, and larger numbers in z means more ventral, then enter the following --
- origin_coords = (-1.2, -0.6, -2)
- grid_spacing = (0.2, 0.2, 0.5)
- reference_frame = "Origin coordinates are relative to bregma. First dimension corresponds to anterior-posterior
- axis (larger index = more anterior). Second dimension corresponds to medial-lateral axis (larger index = more
- rightward). Third dimension corresponds to dorsal-ventral axis (larger index = more ventral)."
- dtype: text
- required: false
-
-
- # These are needed to allow linkage of processed data to the new objects, until this is merged to core
- # Technically the RoiResponseSeries shouldn't need to be modified since it just takes a DynamicTableRegion and
- # does not care about the target
- - neurodata_type_def: MicroscopySegmentations
- neurodata_type_inc: NWBDataInterface
- default_name: MicroscopySegmentations
- doc: Stores pixels in an image that represent different regions of interest (ROIs)
- or masks. All segmentation for a given imaging plane is stored together, with
- storage for multiple imaging planes (masks) supported. Each ROI is stored in its
- own subgroup, with the ROI group containing both a 2D mask and a list of pixels
- that make up this mask. Segments can also be used for masking neuropil. If segmentation
- is allowed to change with time, a new imaging plane (or module) is required and
- ROI names should remain consistent between them.
- groups:
- - neurodata_type_inc: MicroscopyPlaneSegmentation
- doc: Results from image segmentation of a specific imaging plane.
- quantity: '+'
-
-
- - neurodata_type_def: MicroscopyPlaneSegmentation
- neurodata_type_inc: DynamicTable
- doc: Results from image segmentation of a specific imaging plane.
- datasets:
- - name: image_mask
- neurodata_type_inc: VectorData
- dims:
- - - num_roi
- - num_x
- - num_y
- - - num_roi
- - num_x
- - num_y
- - num_z
- shape:
- - - null
- - null
- - null
- - - null
- - null
- - null
- - null
- doc: ROI masks for each ROI. Each image mask is the size of the original imaging
- plane (or volume) and members of the ROI are finite non-zero.
- quantity: '?'
- - name: pixel_mask_index
- neurodata_type_inc: VectorIndex
- doc: Index into pixel_mask.
- quantity: '?'
- - name: pixel_mask
- neurodata_type_inc: VectorData
- dtype:
- - name: x
- dtype: uint32
- doc: Pixel x-coordinate.
- - name: y
- dtype: uint32
- doc: Pixel y-coordinate.
- - name: weight
- dtype: float32
- doc: Weight of the pixel.
- doc: 'Pixel masks for each ROI: a list of indices and weights for the ROI. Pixel
- masks are concatenated and parsing of this dataset is maintained by the PlaneSegmentation'
- quantity: '?'
- - name: voxel_mask_index
- neurodata_type_inc: VectorIndex
- doc: Index into voxel_mask.
- quantity: '?'
- - name: voxel_mask
- neurodata_type_inc: VectorData
- dtype:
- - name: x
- dtype: uint32
- doc: Voxel x-coordinate.
- - name: y
- dtype: uint32
- doc: Voxel y-coordinate.
- - name: z
- dtype: uint32
- doc: Voxel z-coordinate.
- - name: weight
- dtype: float32
- doc: Weight of the voxel.
- doc: 'Voxel masks for each ROI: a list of indices and weights for the ROI. Voxel
- masks are concatenated and parsing of this dataset is maintained by the PlaneSegmentation'
- quantity: '?'
- groups:
- - name: summary_images
- doc: Summary images that are related to the plane segmentation, e.g., mean, correlation, maximum projection.
- groups:
- - neurodata_type_inc: Images
- doc: An container for the estimated summary images.
- quantity: '*'
- links:
- - name: imaging_space
- target_type: ImagingSpace
- doc: Link to ImagingSpace object from which this data was generated.
-
-
- - neurodata_type_def: MicroscopySeries
- neurodata_type_inc: TimeSeries
- doc: Imaging data acquired over time from an optical channel in a microscope while a light source illuminates the
- imaging space.
- links:
- - name: microscope
- doc: Link to a Microscope object containing metadata about the device used to acquire this imaging data.
- target_type: Microscope
- - name: light_source
- doc: Link to a MicroscopyLightSource object containing metadata about the device used to illuminate the imaging space.
- target_type: MicroscopyLightSource
- - name: optical_channel
- doc: Link to a MicroscopyOpticalChannel object containing metadata about the indicator and filters used to collect
- this data.
- target_type: MicroscopyOpticalChannel
-
- - neurodata_type_def: PlanarMicroscopySeries
- neurodata_type_inc: MicroscopySeries
- doc: Imaging data acquired over time from an optical channel in a microscope while a light source illuminates a
- planar imaging space.
- datasets:
- - name: data
- doc: Recorded imaging data, shaped by (number of frames, frame height, frame width).
- dtype: numeric
- dims:
- - frames
- - height
- - width
- shape:
- - null
- - null
- - null
- links:
- - name: imaging_space
- doc: Link to PlanarImagingSpace object containing metadata about the region of physical space this imaging data
- was recorded from.
- target_type: PlanarImagingSpace
-
- - neurodata_type_def: VariableDepthMicroscopySeries
- neurodata_type_inc: PlanarMicroscopySeries
- doc: Volumetric imaging data acquired over an irregular number and amount of depths; for instance, when using an
- electrically tunable lens.
- datasets:
- - name: depth_per_frame_in_um
- doc: Depth in micrometers of each frame in the data array.
- These values offset the 'z' value of the `origin_coordinates` of the linked `imaging_space` object.
- dtype: numeric
- dims:
- - frames
- shape:
- - null
-
- - neurodata_type_def: VolumetricMicroscopySeries
- neurodata_type_inc: MicroscopySeries
- doc: Volumetric imaging data acquired over time from an optical channel in a microscope while a light source
- illuminates a volumetric imaging space.
- Assumes the number of depth scans used to construct the volume is regular.
- datasets:
- - name: data
- doc: Recorded imaging data, shaped by (number of frames, frame height, frame width, number of depth planes).
- dtype: numeric
- dims:
- - frames
- - height
- - width
- - depths
- shape:
- - null
- - null
- - null
- - null
- links:
- - name: imaging_space
- doc: Link to VolumetricImagingSpace object containing metadata about the region of physical space this imaging data
- was recorded from.
- target_type: VolumetricImagingSpace
-
-
- - neurodata_type_def: MultiChannelMicroscopyVolume
- neurodata_type_inc: NWBDataInterface
- doc: Static (not time-varying) volumetric imaging data acquired from multiple optical channels.
- attributes:
- - name: description
- dtype: text
- doc: Description of the MultiChannelVolume.
- required: false
- - name: unit
- dtype: text
- doc: Base unit of measurement for working with the data. Actual stored values are
- not necessarily stored in these units. To access the data in these units,
- multiply 'data' by 'conversion' and add 'offset'.
- - name: conversion
- dtype: float32
- default_value: 1.0
- doc: Scalar to multiply each element in data to convert it to the specified 'unit'.
- If the data are stored in acquisition system units or other units
- that require a conversion to be interpretable, multiply the data by 'conversion'
- to convert the data to the specified 'unit'. e.g. if the data acquisition system
- stores values in this object as signed 16-bit integers (int16 range
- -32,768 to 32,767) that correspond to a 5V range (-2.5V to 2.5V), and the data
- acquisition system gain is 8000X, then the 'conversion' multiplier to get from
- raw data acquisition values to recorded volts is 2.5/32768/8000 = 9.5367e-9.
- required: false
- - name: offset
- dtype: float32
- default_value: 0.0
- doc: Scalar to add to the data after scaling by 'conversion' to finalize its coercion
- to the specified 'unit'. Two common examples of this include (a) data stored in an
- unsigned type that requires a shift after scaling to re-center the data,
- and (b) specialized recording devices that naturally cause a scalar offset with
- respect to the true units.
- required: false
- datasets:
- - name: data
- doc: Recorded imaging data, shaped by (frame height, frame width, number of depth planes, number of optical
- channels).
- dtype: numeric
- dims:
- - height
- - width
- - depths
- - optical_channels
- shape:
- - null
- - null
- - null
- - null
- - name: light_sources
- doc: An ordered list of references to MicroscopyLightSource objects containing metadata about the excitation methods.
- neurodata_type_inc: VectorData
- dtype:
- reftype: object
- target_type: MicroscopyLightSource
- dims:
- - light_sources
- shape:
- - null
- - name: optical_channels
- doc: An ordered list of references to MicroscopyOpticalChannel objects containing metadata about the indicator and filters used to collect this data. This maps to the last dimension of `data`, i.e., the i-th MicroscopyOpticalChannel contains metadata about the indicator and filters used to collect the volume at `data[:,:,:,i]`.
- neurodata_type_inc: VectorData
- dtype:
- reftype: object
- target_type: MicroscopyOpticalChannel
- dims:
- - optical_channels
- shape:
- - null
- links:
- - name: microscope
- doc: Link to a Microscope object containing metadata about the device used to acquire this imaging data.
- target_type: Microscope
- - name: imaging_space
- doc: Link to VolumetricImagingSpace object containing metadata about the region of physical space this imaging data
- was recorded from.
- target_type: VolumetricImagingSpace
-
-
- - neurodata_type_def: VariableDepthMultiChannelMicroscopyVolume
- neurodata_type_inc: NWBDataInterface
- doc: Static (not time-varying) irregularly spaced volumetric imaging data acquired from multiple optical channels.
- attributes:
- - name: description
- dtype: text
- doc: Description of the VariableDepthMultiChannelMicroscopyVolume.
- required: false
- - name: unit
- dtype: text
- doc: Base unit of measurement for working with the data. Actual stored values are
- not necessarily stored in these units. To access the data in these units,
- multiply 'data' by 'conversion' and add 'offset'.
- - name: conversion
- dtype: float32
- default_value: 1.0
- doc: Scalar to multiply each element in data to convert it to the specified 'unit'.
- If the data are stored in acquisition system units or other units
- that require a conversion to be interpretable, multiply the data by 'conversion'
- to convert the data to the specified 'unit'. e.g. if the data acquisition system
- stores values in this object as signed 16-bit integers (int16 range
- -32,768 to 32,767) that correspond to a 5V range (-2.5V to 2.5V), and the data
- acquisition system gain is 8000X, then the 'conversion' multiplier to get from
- raw data acquisition values to recorded volts is 2.5/32768/8000 = 9.5367e-9.
- required: false
- - name: offset
- dtype: float32
- default_value: 0.0
- doc: Scalar to add to the data after scaling by 'conversion' to finalize its coercion
- to the specified 'unit'. Two common examples of this include (a) data stored in an
- unsigned type that requires a shift after scaling to re-center the data,
- and (b) specialized recording devices that naturally cause a scalar offset with
- respect to the true units.
- required: false
- datasets:
- - name: data
- doc: Recorded imaging data, shaped by (frame height, frame width, number of depth planes, number of optical
- channels).
- dtype: numeric
- dims:
- - height
- - width
- - depths
- - channels
- shape:
- - null
- - null
- - null
- - null
- - name: depth_per_frame_in_um
- doc: Depth in micrometers of each frame in the data array.
- dtype: numeric
- dims:
- - depths
- shape:
- - null
- - name: light_sources
- doc: An ordered list of references to MicroscopyLightSource objects containing metadata about the excitation methods.
- neurodata_type_inc: VectorData
- dtype:
- reftype: object
- target_type: MicroscopyLightSource
- dims:
- - light_sources
- shape:
- - null
- - name: optical_channels
- doc: An ordered list of references to MicroscopyOpticalChannel objects containing metadata about the indicator and filters used to collect this data. This maps to the last dimension of `data`, i.e., the i-th MicroscopyOpticalChannel contains metadata about the indicator and filters used to collect the volume at `data[:,:,:,i]`.
- neurodata_type_inc: VectorData
- dtype:
- reftype: object
- target_type: MicroscopyOpticalChannel
- dims:
- - optical_channels
- shape:
- - null
- links:
- - name: microscope
- doc: Link to a Microscope object containing metadata about the device used to acquire this imaging data.
- target_type: Microscope
- - name: imaging_space
- doc: Link to VolumetricImagingSpace object containing metadata about the region of physical space this imaging data
- was recorded from.
- target_type: VolumetricImagingSpace
-
-
- - neurodata_type_def: MicroscopyResponseSeries
- neurodata_type_inc: TimeSeries
- doc: ROI responses extracted from optical imaging.
- datasets:
- - name: data
- dtype: numeric
- dims:
- - - number_of_frames
- - number_of_rois
- shape:
- - - null
- - null
- doc: Signals from ROIs.
- - name: table_region
- neurodata_type_inc: DynamicTableRegion
- doc: DynamicTableRegion referencing plane segmentation containing more information about the ROIs
- stored in this series.
-
- - neurodata_type_def: MicroscopyResponseSeriesContainer
- neurodata_type_inc: NWBDataInterface
- default_name: MicroscopyResponseSeriesContainer
- doc: A container of many MicroscopyResponseSeries.
- groups:
- - neurodata_type_inc: MicroscopyResponseSeries
- doc: MicroscopyResponseSeries object(s) containing fluorescence data for a ROI.
- quantity: '+'
+- neurodata_type_def: TetrodeSeries
+ neurodata_type_inc: ElectricalSeries
+ doc: An extension of ElectricalSeries to include the tetrode ID for each time series.
+ attributes:
+ - name: trode_id
+ dtype: int32
+ doc: The tetrode ID.
diff --git a/spec/ndx-microscopy.namespace.yaml b/spec/ndx-microscopy.namespace.yaml
index 8399608..1313589 100644
--- a/spec/ndx-microscopy.namespace.yaml
+++ b/spec/ndx-microscopy.namespace.yaml
@@ -1,12 +1,13 @@
namespaces:
-- name: ndx-microscopy
- doc: Microscopy extension to NWB standard.
- author:
- - Cody Baker
+- author:
- Alessandra Trapani
+ - Cody Baker
contact:
- - cody.baker@catalystneuro.com
- alessandra.trapani@catalystneuro.com
+ - cody.baker@catalystneuro.com
+ doc: An NWB extension to demonstrate the TAB proposal for enhancements to optical
+ physiology neurodata types.
+ name: ndx-microscopy
schema:
- namespace: core
- source: ndx-microscopy.extensions.yaml
diff --git a/src/pynwb/README.md b/src/pynwb/README.md
new file mode 100644
index 0000000..e69de29
diff --git a/src/pynwb/ndx_microscopy/__init__.py b/src/pynwb/ndx_microscopy/__init__.py
index d5f989b..ee4feb8 100644
--- a/src/pynwb/ndx_microscopy/__init__.py
+++ b/src/pynwb/ndx_microscopy/__init__.py
@@ -1,6 +1,5 @@
import os
-
-from pynwb import get_class, load_namespaces
+from pynwb import load_namespaces, get_class
try:
from importlib.resources import files
@@ -8,52 +7,22 @@
# TODO: Remove when python 3.9 becomes the new minimum
from importlib_resources import files
-extension_name = "ndx-microscopy"
-
# Get path to the namespace.yaml file with the expected location when installed not in editable mode
__location_of_this_file = files(__name__)
-__spec_path = __location_of_this_file / "spec" / f"{extension_name}.namespace.yaml"
+__spec_path = __location_of_this_file / "spec" / "ndx-microscopy.namespace.yaml"
# If that path does not exist, we are likely running in editable mode. Use the local path instead
if not os.path.exists(__spec_path):
- __spec_path = __location_of_this_file.parent.parent.parent / "spec" / f"{extension_name}.namespace.yaml"
+ __spec_path = __location_of_this_file.parent.parent.parent / "spec" / "ndx-microscopy.namespace.yaml"
+# Load the namespace
load_namespaces(str(__spec_path))
-Microscope = get_class("Microscope", extension_name)
-MicroscopyLightSource = get_class("MicroscopyLightSource", extension_name)
-MicroscopyOpticalChannel = get_class("MicroscopyOpticalChannel", extension_name)
-ImagingSpace = get_class("ImagingSpace", extension_name)
-PlanarImagingSpace = get_class("PlanarImagingSpace", extension_name)
-VolumetricImagingSpace = get_class("VolumetricImagingSpace", extension_name)
-MicroscopySegmentations = get_class("MicroscopySegmentations", extension_name)
-MicroscopyPlaneSegmentation = get_class("MicroscopyPlaneSegmentation", extension_name)
-MicroscopySeries = get_class("MicroscopySeries", extension_name)
-PlanarMicroscopySeries = get_class("PlanarMicroscopySeries", extension_name)
-VariableDepthMicroscopySeries = get_class("VariableDepthMicroscopySeries", extension_name)
-VolumetricMicroscopySeries = get_class("VolumetricMicroscopySeries", extension_name)
-MultiChannelMicroscopyVolume = get_class("MultiChannelMicroscopyVolume", extension_name)
-VariableDepthMultiChannelMicroscopyVolume = get_class("VariableDepthMultiChannelMicroscopyVolume", extension_name)
-
-MicroscopyResponseSeries = get_class("MicroscopyResponseSeries", extension_name)
-MicroscopyResponseSeriesContainer = get_class("MicroscopyResponseSeriesContainer", extension_name)
-
+# TODO: Define your classes here to make them accessible at the package level.
+# Either have PyNWB generate a class from the spec using `get_class` as shown
+# below or write a custom class and register it using the class decorator
+# `@register_class("TetrodeSeries", "ndx-microscopy")`
+TetrodeSeries = get_class("TetrodeSeries", "ndx-microscopy")
-__all__ = [
- "Microscope",
- "MicroscopyLightSource",
- "MicroscopyOpticalChannel",
- "ImagingSpace",
- "PlanarImagingSpace",
- "VolumetricImagingSpace",
- "MicroscopySegmentations",
- "MicroscopyPlaneSegmentation",
- "MicroscopySeries",
- "PlanarMicroscopySeries",
- "VariableDepthMicroscopySeries",
- "VolumetricMicroscopySeries",
- "MultiChannelMicroscopyVolume",
- "VariableDepthMultiChannelMicroscopyVolume",
- "MicroscopyResponseSeries",
- "MicroscopyResponseSeriesContainer",
-]
+# Remove these functions from the package
+del load_namespaces, get_class
diff --git a/src/pynwb/ndx_microscopy/testing/__init__.py b/src/pynwb/ndx_microscopy/testing/__init__.py
deleted file mode 100644
index 848c090..0000000
--- a/src/pynwb/ndx_microscopy/testing/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from ._mock import (
- mock_Microscope,
- mock_MicroscopyLightSource,
- mock_MicroscopyOpticalChannel,
- mock_MicroscopyPlaneSegmentation,
- mock_MicroscopyResponseSeries,
- mock_MicroscopyResponseSeriesContainer,
- mock_MicroscopySegmentations,
- mock_MultiChannelMicroscopyVolume,
- mock_PlanarImagingSpace,
- mock_PlanarMicroscopySeries,
- mock_VariableDepthMicroscopySeries,
- mock_VariableDepthMultiChannelMicroscopyVolume,
- mock_VolumetricImagingSpace,
- mock_VolumetricMicroscopySeries,
-)
-
-__all__ = [
- "mock_Microscope",
- "mock_MicroscopyLightSource",
- "mock_MicroscopyOpticalChannel",
- "mock_PlanarImagingSpace",
- "mock_VolumetricImagingSpace",
- "mock_MicroscopySegmentations",
- "mock_MicroscopyPlaneSegmentation",
- "mock_PlanarMicroscopySeries",
- "mock_VariableDepthMicroscopySeries",
- "mock_VolumetricMicroscopySeries",
- "mock_MultiChannelMicroscopyVolume",
- "mock_MicroscopyResponseSeries",
- "mock_MicroscopyResponseSeriesContainer",
- "mock_VariableDepthMultiChannelMicroscopyVolume",
-]
diff --git a/src/pynwb/ndx_microscopy/testing/_mock.py b/src/pynwb/ndx_microscopy/testing/_mock.py
deleted file mode 100644
index cb7d919..0000000
--- a/src/pynwb/ndx_microscopy/testing/_mock.py
+++ /dev/null
@@ -1,471 +0,0 @@
-import warnings
-from typing import Iterable, List, Optional, Tuple
-
-import numpy as np
-import pynwb.base
-from pynwb.testing.mock.utils import name_generator
-
-import ndx_microscopy
-
-
-def mock_Microscope(
- *,
- name: Optional[str] = None,
- description: str = "A mock instance of a Microscope type to be used for rapid testing.",
- manufacturer: str = "A fake manufacturer of the mock microscope.",
- model: str = "A fake model of the mock microscope.",
-) -> ndx_microscopy.Microscope:
- microscope = ndx_microscopy.Microscope(
- name=name or name_generator("Microscope"),
- description=description,
- manufacturer=manufacturer,
- model=model,
- )
- return microscope
-
-
-def mock_MicroscopyLightSource(
- *,
- name: Optional[str] = None,
- description: str = "A mock instance of a MicroscopyLightSource type to be used for rapid testing.",
- manufacturer: str = "A fake manufacturer of the mock light source.",
- model: str = "A fake model of the mock light source.",
- filter_description: str = "A description about the fake filter used by the mock light source.",
- excitation_wavelength_in_nm: float = 500.0,
- peak_power_in_W: float = 0.7,
- peak_pulse_energy_in_J: float = 0.7,
- intensity_in_W_per_m2: float = 0.005,
- exposure_time_in_s: float = 2.51e-13,
- pulse_rate_in_Hz: float = 2.0e6,
-) -> ndx_microscopy.MicroscopyLightSource:
- light_source = ndx_microscopy.MicroscopyLightSource(
- name=name or name_generator("MicroscopyLightSource"),
- description=description,
- manufacturer=manufacturer,
- model=model,
- filter_description=filter_description,
- excitation_wavelength_in_nm=excitation_wavelength_in_nm,
- peak_power_in_W=peak_power_in_W,
- peak_pulse_energy_in_J=peak_pulse_energy_in_J,
- intensity_in_W_per_m2=intensity_in_W_per_m2,
- exposure_time_in_s=exposure_time_in_s,
- pulse_rate_in_Hz=pulse_rate_in_Hz,
- )
- return light_source
-
-
-def mock_MicroscopyOpticalChannel(
- *,
- name: Optional[str] = None,
- description: str = "A mock instance of a MicroscopyOpticalChannel type to be used for rapid testing.",
- indicator: str = "The indicator targeted by the mock optical channel.",
- filter_description: str = "A description about the fake filter used by the mock optical channel.",
- emission_wavelength_in_nm: float = 450.0,
-) -> ndx_microscopy.MicroscopyOpticalChannel:
- optical_channel = ndx_microscopy.MicroscopyOpticalChannel(
- name=name or name_generator("MicroscopyOpticalChannel"),
- description=description,
- indicator=indicator,
- filter_description=filter_description,
- emission_wavelength_in_nm=emission_wavelength_in_nm,
- )
- return optical_channel
-
-
-def mock_PlanarImagingSpace(
- *,
- microscope: ndx_microscopy.Microscope,
- name: Optional[str] = None,
- description: str = "A mock instance of a PlanarImagingSpace type to be used for rapid testing.",
- origin_coordinates: Tuple[float, float, float] = (-1.2, -0.6, -2),
- grid_spacing_in_um: Tuple[float, float, float] = (20, 20),
- location: str = "The location targeted by the mock imaging space.",
- reference_frame: str = "The reference frame of the mock planar imaging space.",
-) -> ndx_microscopy.PlanarImagingSpace:
- planar_imaging_space = ndx_microscopy.PlanarImagingSpace(
- name=name or name_generator("PlanarImagingSpace"),
- description=description,
- microscope=microscope,
- origin_coordinates=origin_coordinates,
- grid_spacing_in_um=grid_spacing_in_um,
- location=location,
- reference_frame=reference_frame,
- )
- return planar_imaging_space
-
-
-def mock_VolumetricImagingSpace(
- *,
- microscope: ndx_microscopy.Microscope,
- name: Optional[str] = None,
- description: str = "A mock instance of a VolumetricImagingSpace type to be used for rapid testing.",
- origin_coordinates: Tuple[float, float, float] = (-1.2, -0.6, -2),
- grid_spacing_in_um: Tuple[float, float, float] = (20, 20, 50),
- location: str = "The location targeted by the mock imaging space.",
- reference_frame: str = "The reference frame of the mock volumetric imaging space.",
-) -> ndx_microscopy.VolumetricImagingSpace:
- volumetric_imaging_space = ndx_microscopy.VolumetricImagingSpace(
- name=name or name_generator("VolumetricImagingSpace"),
- description=description,
- microscope=microscope,
- origin_coordinates=origin_coordinates,
- grid_spacing_in_um=grid_spacing_in_um,
- location=location,
- reference_frame=reference_frame,
- )
- return volumetric_imaging_space
-
-
-def mock_MicroscopySegmentations(
- *,
- name: Optional[str] = None,
- microscopy_plane_segmentations: Optional[Iterable[ndx_microscopy.MicroscopyPlaneSegmentation]] = None,
-) -> ndx_microscopy.MicroscopySegmentations:
- name = name or name_generator("MicroscopySegmentations")
-
- microscope = mock_Microscope()
- imaging_space = mock_PlanarImagingSpace(microscope=microscope)
- microscopy_plane_segmentations = microscopy_plane_segmentations or [
- mock_MicroscopyPlaneSegmentation(imaging_space=imaging_space)
- ]
-
- segmentations = ndx_microscopy.MicroscopySegmentations(
- name=name, microscopy_plane_segmentations=microscopy_plane_segmentations
- )
-
- return segmentations
-
-
-def mock_MicroscopyPlaneSegmentation(
- *,
- imaging_space: ndx_microscopy.ImagingSpace,
- name: Optional[str] = None,
- description: str = "A mock instance of a MicroscopyPlaneSegmentation type to be used for rapid testing.",
- number_of_rois: int = 5,
- image_shape: Tuple[int, int] = (10, 10),
-) -> ndx_microscopy.MicroscopyPlaneSegmentation:
- name = name or name_generator("MicroscopyPlaneSegmentation")
-
- plane_segmentation = ndx_microscopy.MicroscopyPlaneSegmentation(
- name=name, description=description, imaging_space=imaging_space, id=list(range(number_of_rois))
- )
- # plane_segmentation.add_column(name="id", description="", data=list(range(number_of_rois)))
-
- image_masks = list()
- for _ in range(number_of_rois):
- image_masks.append(np.zeros(image_shape, dtype=bool))
- plane_segmentation.add_column(name="image_mask", description="", data=image_masks)
-
- return plane_segmentation
-
-
-def mock_PlanarMicroscopySeries(
- *,
- microscope: ndx_microscopy.Microscope,
- light_source: ndx_microscopy.MicroscopyLightSource,
- imaging_space: ndx_microscopy.PlanarImagingSpace,
- optical_channel: ndx_microscopy.MicroscopyOpticalChannel,
- name: Optional[str] = None,
- description: str = "A mock instance of a PlanarMicroscopySeries type to be used for rapid testing.",
- data: Optional[np.ndarray] = None,
- unit: str = "a.u.",
- conversion: float = 1.0,
- offset: float = 0.0,
- starting_time: Optional[float] = None,
- rate: Optional[float] = None,
- timestamps: Optional[np.ndarray] = None,
-) -> ndx_microscopy.PlanarMicroscopySeries:
- series_name = name or name_generator("PlanarMicroscopySeries")
- series_data = data if data is not None else np.ones(shape=(15, 5, 5))
-
- if timestamps is None:
- series_starting_time = starting_time or 0.0
- series_rate = rate or 10.0
- series_timestamps = None
- else:
- if starting_time is not None or rate is not None:
- warnings.warn(
- message=(
- "Timestamps were provided in addition to either rate or starting_time! "
- "Please specify only timestamps, or both starting_time and rate. Timestamps will take precedence."
- ),
- stacklevel=2,
- )
-
- series_starting_time = None
- series_rate = None
- series_timestamps = timestamps
-
- planar_microscopy_series = ndx_microscopy.PlanarMicroscopySeries(
- name=series_name,
- description=description,
- microscope=microscope,
- light_source=light_source,
- imaging_space=imaging_space,
- optical_channel=optical_channel,
- data=series_data,
- unit=unit,
- conversion=conversion,
- offset=offset,
- starting_time=series_starting_time,
- rate=series_rate,
- timestamps=series_timestamps,
- )
- return planar_microscopy_series
-
-
-def mock_VariableDepthMicroscopySeries(
- *,
- microscope: ndx_microscopy.Microscope,
- light_source: ndx_microscopy.MicroscopyLightSource,
- imaging_space: ndx_microscopy.PlanarImagingSpace,
- optical_channel: ndx_microscopy.MicroscopyOpticalChannel,
- name: Optional[str] = None,
- description: str = "A mock instance of a PlanarMicroscopySeries type to be used for rapid testing.",
- data: Optional[np.ndarray] = None,
- depth_per_frame_in_um: Optional[np.ndarray] = None,
- unit: str = "a.u.",
- conversion: float = 1.0,
- offset: float = 0.0,
- starting_time: Optional[float] = None,
- rate: Optional[float] = None,
- timestamps: Optional[np.ndarray] = None,
-) -> ndx_microscopy.VariableDepthMicroscopySeries:
- series_name = name or name_generator("VariableDepthMicroscopySeries")
- series_data = data if data is not None else np.ones(shape=(15, 5, 5))
-
- depth_per_frame_in_um = (
- depth_per_frame_in_um
- if depth_per_frame_in_um is not None
- else np.linspace(start=0.0, stop=30.0, num=series_data.shape[0])
- )
-
- if timestamps is None:
- series_starting_time = starting_time or 0.0
- series_rate = rate or 10.0
- series_timestamps = None
- else:
- if starting_time is not None or rate is not None:
- warnings.warn(
- message=(
- "Timestamps were provided in addition to either rate or starting_time! "
- "Please specify only timestamps, or both starting_time and rate. Timestamps will take precedence."
- ),
- stacklevel=2,
- )
-
- series_starting_time = None
- series_rate = None
- series_timestamps = timestamps
-
- variable_depth_microscopy_series = ndx_microscopy.VariableDepthMicroscopySeries(
- name=series_name,
- description=description,
- microscope=microscope,
- light_source=light_source,
- imaging_space=imaging_space,
- optical_channel=optical_channel,
- data=series_data,
- depth_per_frame_in_um=depth_per_frame_in_um,
- unit=unit,
- conversion=conversion,
- offset=offset,
- starting_time=series_starting_time,
- rate=series_rate,
- timestamps=series_timestamps,
- )
- return variable_depth_microscopy_series
-
-
-def mock_VolumetricMicroscopySeries(
- *,
- microscope: ndx_microscopy.Microscope,
- light_source: ndx_microscopy.MicroscopyLightSource,
- imaging_space: ndx_microscopy.VolumetricImagingSpace,
- optical_channel: ndx_microscopy.MicroscopyOpticalChannel,
- name: Optional[str] = None,
- description: str = "A mock instance of a VolumetricMicroscopySeries type to be used for rapid testing.",
- data: Optional[np.ndarray] = None,
- unit: str = "a.u.",
- conversion: float = 1.0,
- offset: float = 0.0,
- starting_time: Optional[float] = None,
- rate: Optional[float] = None,
- timestamps: Optional[np.ndarray] = None,
-) -> ndx_microscopy.VolumetricMicroscopySeries:
- series_name = name or name_generator("VolumetricMicroscopySeries")
- series_data = data if data is not None else np.ones(shape=(5, 5, 5, 3))
-
- if timestamps is None:
- series_starting_time = starting_time or 0.0
- series_rate = rate or 10.0
- series_timestamps = None
- else:
- if starting_time is not None or rate is not None:
- warnings.warn(
- message=(
- "Timestamps were provided in addition to either rate or starting_time! "
- "Please specify only timestamps, or both starting_time and rate. Timestamps will take precedence."
- ),
- stacklevel=2,
- )
-
- series_starting_time = None
- series_rate = None
- series_timestamps = timestamps
-
- volumetric_microscopy_series = ndx_microscopy.VolumetricMicroscopySeries(
- name=series_name,
- description=description,
- microscope=microscope,
- light_source=light_source,
- imaging_space=imaging_space,
- optical_channel=optical_channel,
- data=series_data,
- unit=unit,
- conversion=conversion,
- offset=offset,
- starting_time=series_starting_time,
- rate=series_rate,
- timestamps=series_timestamps,
- )
- return volumetric_microscopy_series
-
-
-def mock_MultiChannelMicroscopyVolume(
- *,
- microscope: ndx_microscopy.Microscope,
- imaging_space: ndx_microscopy.VolumetricImagingSpace,
- light_sources: pynwb.base.VectorData,
- optical_channels: pynwb.base.VectorData,
- name: Optional[str] = None,
- description: str = "A mock instance of a MultiChannelMicroscopyVolume type to be used for rapid testing.",
- data: Optional[np.ndarray] = None,
- unit: str = "n.a.",
- conversion: float = 1.0,
- offset: float = 0.0,
-) -> ndx_microscopy.MultiChannelMicroscopyVolume:
- series_name = name or name_generator("MultiChannelMicroscopyVolume")
- imaging_data = data if data is not None else np.ones(shape=(10, 20, 7, 3))
-
- volumetric_microscopy_series = ndx_microscopy.MultiChannelMicroscopyVolume(
- name=series_name,
- description=description,
- microscope=microscope,
- imaging_space=imaging_space,
- light_sources=light_sources,
- optical_channels=optical_channels,
- data=imaging_data,
- unit=unit,
- conversion=conversion,
- offset=offset,
- )
- return volumetric_microscopy_series
-
-
-def mock_MicroscopyResponseSeries(
- *,
- table_region: pynwb.core.DynamicTableRegion,
- name: Optional[str] = None,
- description: str = "A mock instance of a MicroscopyResponseSeries type to be used for rapid testing.",
- data: Optional[np.ndarray] = None,
- unit: str = "a.u.",
- conversion: float = 1.0,
- offset: float = 0.0,
- starting_time: Optional[float] = None,
- rate: Optional[float] = None,
- timestamps: Optional[np.ndarray] = None,
-) -> ndx_microscopy.MicroscopyResponseSeries:
- series_name = name or name_generator("MicroscopyResponseSeries")
-
- number_of_frames = 100
- number_of_rois = len(table_region.data)
- series_data = data if data is not None else np.ones(shape=(number_of_frames, number_of_rois))
-
- if timestamps is None:
- series_starting_time = starting_time or 0.0
- series_rate = rate or 10.0
- series_timestamps = None
- else:
- if starting_time is not None or rate is not None:
- warnings.warn(
- message=(
- "Timestamps were provided in addition to either rate or starting_time! "
- "Please specify only timestamps, or both starting_time and rate. Timestamps will take precedence."
- ),
- stacklevel=2,
- )
-
- series_starting_time = None
- series_rate = None
- series_timestamps = timestamps
-
- microscopy_response_series = ndx_microscopy.MicroscopyResponseSeries(
- name=series_name,
- description=description,
- table_region=table_region,
- data=series_data,
- unit=unit,
- conversion=conversion,
- offset=offset,
- starting_time=series_starting_time,
- rate=series_rate,
- timestamps=series_timestamps,
- )
-
- return microscopy_response_series
-
-
-def mock_MicroscopyResponseSeriesContainer(
- *,
- microscopy_response_series: List[ndx_microscopy.MicroscopyResponseSeries],
- name: Optional[str] = None,
-) -> ndx_microscopy.MicroscopyResponseSeriesContainer:
- container_name = name or name_generator("MicroscopyResponseSeriesContainer")
-
- microscopy_response_series_container = ndx_microscopy.MicroscopyResponseSeriesContainer(
- name=container_name, microscopy_response_series=microscopy_response_series
- )
-
- return microscopy_response_series_container
-
-
-def mock_VariableDepthMultiChannelMicroscopyVolume(
- *,
- microscope: ndx_microscopy.Microscope,
- imaging_space: ndx_microscopy.VolumetricImagingSpace,
- light_sources: pynwb.base.VectorData,
- optical_channels: pynwb.base.VectorData,
- name: Optional[str] = None,
- description: str = "A mock instance of a MultiChannelMicroscopyVolume type to be used for rapid testing.",
- data: Optional[np.ndarray] = None,
- depth_per_frame_in_um: Optional[np.ndarray] = None,
- unit: str = "n.a.",
- conversion: float = 1.0,
- offset: float = 0.0,
-) -> ndx_microscopy.VariableDepthMultiChannelMicroscopyVolume:
- series_name = name or name_generator("MultiChannelMicroscopyVolume")
-
- series_data = data if data is not None else np.ones(shape=(15, 5, 5))
-
- volume_depth_per_frame_in_um = (
- depth_per_frame_in_um
- if depth_per_frame_in_um is not None
- else np.linspace(start=0.0, stop=30.0, num=series_data.shape[0])
- )
-
- imaging_data = data if data is not None else np.ones(shape=(10, 20, 7, 3))
- variable_depth_multi_channel_microscopy_volume = ndx_microscopy.VariableDepthMultiChannelMicroscopyVolume(
- name=series_name,
- description=description,
- microscope=microscope,
- imaging_space=imaging_space,
- light_sources=light_sources,
- optical_channels=optical_channels,
- data=imaging_data,
- depth_per_frame_in_um=volume_depth_per_frame_in_um,
- unit=unit,
- conversion=conversion,
- offset=offset,
- )
- return variable_depth_multi_channel_microscopy_volume
diff --git a/src/pynwb/tests/test_constructors.py b/src/pynwb/tests/test_constructors.py
deleted file mode 100644
index bcbb3cf..0000000
--- a/src/pynwb/tests/test_constructors.py
+++ /dev/null
@@ -1,178 +0,0 @@
-"""Test in-memory Python API constructors for the ndx-microscopy extension."""
-
-import pynwb.testing.mock.ophys
-import pytest
-
-import ndx_microscopy.testing
-import pynwb
-
-
-def test_constructor_microscope():
- ndx_microscopy.testing.mock_Microscope()
-
-
-def test_constructor_light_source():
- ndx_microscopy.testing.mock_MicroscopyLightSource()
-
-
-def test_constructor_microscopy_optical_channel():
- ndx_microscopy.testing.mock_MicroscopyOpticalChannel()
-
-
-def test_constructor_planar_image_space():
- microscope = ndx_microscopy.testing.mock_Microscope()
-
- ndx_microscopy.testing.mock_PlanarImagingSpace(microscope=microscope)
-
-
-def test_constructor_volumetric_image_space():
- microscope = ndx_microscopy.testing.mock_Microscope()
-
- ndx_microscopy.testing.mock_VolumetricImagingSpace(microscope=microscope)
-
-
-def test_constructor_microscopy_segmentations():
- ndx_microscopy.testing.mock_MicroscopySegmentations()
-
-
-def test_constructor_microscopy_plane_segmentation():
- microscope = ndx_microscopy.testing.mock_Microscope()
- imaging_space = ndx_microscopy.testing.mock_PlanarImagingSpace(microscope=microscope)
-
- ndx_microscopy.testing.mock_MicroscopyPlaneSegmentation(imaging_space=imaging_space)
-
-
-def test_constructor_microscopy_image_segmentation_with_plane_segmentation():
- microscope = ndx_microscopy.testing.mock_Microscope()
- imaging_space = ndx_microscopy.testing.mock_PlanarImagingSpace(microscope=microscope)
-
- plane_segmentation_1 = ndx_microscopy.testing.mock_MicroscopyPlaneSegmentation(
- imaging_space=imaging_space, name="MicroscopyPlaneSegmentation1"
- )
- plane_segmentation_2 = ndx_microscopy.testing.mock_MicroscopyPlaneSegmentation(
- imaging_space=imaging_space, name="MicroscopyPlaneSegmentation2"
- )
- microscopy_plane_segmentations = [plane_segmentation_1, plane_segmentation_2]
-
- ndx_microscopy.testing.mock_MicroscopySegmentations(microscopy_plane_segmentations=microscopy_plane_segmentations)
-
-
-def test_constructor_planar_microscopy_series():
- microscope = ndx_microscopy.testing.mock_Microscope()
- light_source = ndx_microscopy.testing.mock_MicroscopyLightSource()
- imaging_space = ndx_microscopy.testing.mock_PlanarImagingSpace(microscope=microscope)
- optical_channel = ndx_microscopy.testing.mock_MicroscopyOpticalChannel()
-
- ndx_microscopy.testing.mock_PlanarMicroscopySeries(
- microscope=microscope, light_source=light_source, imaging_space=imaging_space, optical_channel=optical_channel
- )
-
-
-def test_constructor_variable_depth_microscopy_series():
- microscope = ndx_microscopy.testing.mock_Microscope()
- light_source = ndx_microscopy.testing.mock_MicroscopyLightSource()
- imaging_space = ndx_microscopy.testing.mock_PlanarImagingSpace(microscope=microscope)
- optical_channel = ndx_microscopy.testing.mock_MicroscopyOpticalChannel()
-
- ndx_microscopy.testing.mock_VariableDepthMicroscopySeries(
- microscope=microscope, light_source=light_source, imaging_space=imaging_space, optical_channel=optical_channel
- )
-
-
-def test_constructor_volumetric_microscopy_series():
- microscope = ndx_microscopy.testing.mock_Microscope()
- light_source = ndx_microscopy.testing.mock_MicroscopyLightSource()
- imaging_space = ndx_microscopy.testing.mock_VolumetricImagingSpace(microscope=microscope)
- optical_channel = ndx_microscopy.testing.mock_MicroscopyOpticalChannel()
-
- ndx_microscopy.testing.mock_VolumetricMicroscopySeries(
- microscope=microscope, light_source=light_source, imaging_space=imaging_space, optical_channel=optical_channel
- )
-
-
-def test_constructor_microscopy_response_series():
- number_of_rois = 10
-
- plane_segmentation = pynwb.testing.mock.ophys.mock_PlaneSegmentation()
-
- table_region = pynwb.core.DynamicTableRegion(
- name="table_region",
- description="",
- data=[x for x in range(number_of_rois)],
- table=plane_segmentation,
- )
-
- ndx_microscopy.testing.mock_MicroscopyResponseSeries(table_region=table_region)
-
-
-def test_constructor_microscopy_response_series_container():
- number_of_rois = 10
-
- plane_segmentation = pynwb.testing.mock.ophys.mock_PlaneSegmentation()
-
- table_region = pynwb.core.DynamicTableRegion(
- name="table_region",
- description="",
- data=[x for x in range(number_of_rois)],
- table=plane_segmentation,
- )
-
- microscopy_response_series = ndx_microscopy.testing.mock_MicroscopyResponseSeries(table_region=table_region)
-
- ndx_microscopy.testing.mock_MicroscopyResponseSeriesContainer(
- microscopy_response_series=[microscopy_response_series]
- )
-
-
-def test_constructor_multi_channel_microscopy_volume():
- microscope = ndx_microscopy.testing.mock_Microscope()
- imaging_space = ndx_microscopy.testing.mock_VolumetricImagingSpace(microscope=microscope)
- light_sources = [ndx_microscopy.testing.mock_MicroscopyLightSource()]
- optical_channels = [ndx_microscopy.testing.mock_MicroscopyOpticalChannel()]
-
- light_sources_used_by_volume = pynwb.base.VectorData(
- name="light_sources", description="Light sources used by this MultiChannelVolume.", data=light_sources
- )
- optical_channels_used_by_volume = pynwb.base.VectorData(
- name="optical_channels",
- description=(
- "Optical channels ordered to correspond to the third axis (e.g., [0, 0, :, 0]) "
- "of the data for this MultiChannelVolume."
- ),
- data=optical_channels,
- )
- ndx_microscopy.testing.mock_MultiChannelMicroscopyVolume(
- microscope=microscope,
- imaging_space=imaging_space,
- light_sources=light_sources_used_by_volume,
- optical_channels=optical_channels_used_by_volume,
- )
-
-
-def test_constructor_variable_depth_multi_channel_microscopy_volume():
- microscope = ndx_microscopy.testing.mock_Microscope()
- imaging_space = ndx_microscopy.testing.mock_VolumetricImagingSpace(microscope=microscope)
- light_sources = [ndx_microscopy.testing.mock_MicroscopyLightSource()]
- optical_channels = [ndx_microscopy.testing.mock_MicroscopyOpticalChannel()]
-
- light_sources_used_by_volume = pynwb.base.VectorData(
- name="light_sources", description="Light sources used by this MultiChannelVolume.", data=light_sources
- )
- optical_channels_used_by_volume = pynwb.base.VectorData(
- name="optical_channels",
- description=(
- "Optical channels ordered to correspond to the third axis (e.g., [0, 0, :, 0]) "
- "of the data for this MultiChannelVolume."
- ),
- data=optical_channels,
- )
- ndx_microscopy.testing.mock_VariableDepthMultiChannelMicroscopyVolume(
- microscope=microscope,
- imaging_space=imaging_space,
- light_sources=light_sources_used_by_volume,
- optical_channels=optical_channels_used_by_volume,
- )
-
-
-if __name__ == "__main__":
- pytest.main() # Required since not a typical package structure
diff --git a/src/pynwb/tests/test_roundtrip.py b/src/pynwb/tests/test_roundtrip.py
deleted file mode 100644
index 80d6c54..0000000
--- a/src/pynwb/tests/test_roundtrip.py
+++ /dev/null
@@ -1,339 +0,0 @@
-"""Test roundtrip (write and read back) of the Python API for the ndx-microscopy extension."""
-
-import pytest
-from pynwb.testing import TestCase as pynwb_TestCase
-from pynwb.testing.mock.file import mock_NWBFile
-
-import ndx_microscopy.testing
-import pynwb
-
-
-class TestPlanarMicroscopySeriesSimpleRoundtrip(pynwb_TestCase):
- """Simple roundtrip test for PlanarMicroscopySeries."""
-
- def setUp(self):
- self.nwbfile_path = "test_planar_microscopy_series_roundtrip.nwb"
-
- def tearDown(self):
- pynwb.testing.remove_test_file(self.nwbfile_path)
-
- def test_roundtrip(self):
- nwbfile = mock_NWBFile()
-
- microscope = ndx_microscopy.testing.mock_Microscope(name="Microscope")
- nwbfile.add_device(devices=microscope)
-
- light_source = ndx_microscopy.testing.mock_MicroscopyLightSource(name="MicroscopyLightSource")
- nwbfile.add_device(devices=light_source)
-
- imaging_space = ndx_microscopy.testing.mock_PlanarImagingSpace(name="PlanarImagingSpace", microscope=microscope)
- nwbfile.add_lab_meta_data(lab_meta_data=imaging_space) # Would prefer .add_imaging_spacec()
-
- optical_channel = ndx_microscopy.testing.mock_MicroscopyOpticalChannel(name="MicroscopyOpticalChannel")
- nwbfile.add_lab_meta_data(lab_meta_data=optical_channel)
-
- planar_microscopy_series = ndx_microscopy.testing.mock_PlanarMicroscopySeries(
- name="PlanarMicroscopySeries",
- microscope=microscope,
- light_source=light_source,
- imaging_space=imaging_space,
- optical_channel=optical_channel,
- )
- nwbfile.add_acquisition(nwbdata=planar_microscopy_series)
-
- with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="w") as io:
- io.write(nwbfile)
-
- with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="r", load_namespaces=True) as io:
- read_nwbfile = io.read()
-
- self.assertContainerEqual(microscope, read_nwbfile.devices["Microscope"])
- self.assertContainerEqual(light_source, read_nwbfile.devices["MicroscopyLightSource"])
-
- self.assertContainerEqual(imaging_space, read_nwbfile.lab_meta_data["PlanarImagingSpace"])
- self.assertContainerEqual(optical_channel, read_nwbfile.lab_meta_data["MicroscopyOpticalChannel"])
-
- self.assertContainerEqual(planar_microscopy_series, read_nwbfile.acquisition["PlanarMicroscopySeries"])
-
-
-class TestVolumetricMicroscopySeriesSimpleRoundtrip(pynwb_TestCase):
- """Simple roundtrip test for VolumetricMicroscopySeries."""
-
- def setUp(self):
- self.nwbfile_path = "test_volumetric_microscopy_series_roundtrip.nwb"
-
- def tearDown(self):
- pynwb.testing.remove_test_file(self.nwbfile_path)
-
- def test_roundtrip(self):
- nwbfile = mock_NWBFile()
-
- microscope = ndx_microscopy.testing.mock_Microscope(name="Microscope")
- nwbfile.add_device(devices=microscope)
-
- light_source = ndx_microscopy.testing.mock_MicroscopyLightSource(name="MicroscopyLightSource")
- nwbfile.add_device(devices=light_source)
-
- imaging_space = ndx_microscopy.testing.mock_VolumetricImagingSpace(
- name="VolumetricImagingSpace", microscope=microscope
- )
- nwbfile.add_lab_meta_data(lab_meta_data=imaging_space) # Would prefer .add_imaging_spacec()
-
- optical_channel = ndx_microscopy.testing.mock_MicroscopyOpticalChannel(name="MicroscopyOpticalChannel")
- nwbfile.add_lab_meta_data(lab_meta_data=optical_channel)
-
- volumetric_microscopy_series = ndx_microscopy.testing.mock_VolumetricMicroscopySeries(
- name="VolumetricMicroscopySeries",
- microscope=microscope,
- light_source=light_source,
- imaging_space=imaging_space,
- optical_channel=optical_channel,
- )
- nwbfile.add_acquisition(nwbdata=volumetric_microscopy_series)
-
- with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="w") as io:
- io.write(nwbfile)
-
- with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="r", load_namespaces=True) as io:
- read_nwbfile = io.read()
-
- self.assertContainerEqual(microscope, read_nwbfile.devices["Microscope"])
- self.assertContainerEqual(light_source, read_nwbfile.devices["MicroscopyLightSource"])
-
- self.assertContainerEqual(imaging_space, read_nwbfile.lab_meta_data["VolumetricImagingSpace"])
- self.assertContainerEqual(optical_channel, read_nwbfile.lab_meta_data["MicroscopyOpticalChannel"])
-
- self.assertContainerEqual(
- volumetric_microscopy_series, read_nwbfile.acquisition["VolumetricMicroscopySeries"]
- )
-
-
-class TestVariableDepthMicroscopySeriesSimpleRoundtrip(pynwb_TestCase):
- """Simple roundtrip test for VariableDepthMicroscopySeries."""
-
- def setUp(self):
- self.nwbfile_path = "test_variable_depth_microscopy_series_roundtrip.nwb"
-
- def tearDown(self):
- pynwb.testing.remove_test_file(self.nwbfile_path)
-
- def test_roundtrip(self):
- nwbfile = mock_NWBFile()
-
- microscope = ndx_microscopy.testing.mock_Microscope(name="Microscope")
- nwbfile.add_device(devices=microscope)
-
- light_source = ndx_microscopy.testing.mock_MicroscopyLightSource(name="MicroscopyLightSource")
- nwbfile.add_device(devices=light_source)
-
- imaging_space = ndx_microscopy.testing.mock_PlanarImagingSpace(name="PlanarImagingSpace", microscope=microscope)
- nwbfile.add_lab_meta_data(lab_meta_data=imaging_space) # Would prefer .add_imaging_space()
-
- optical_channel = ndx_microscopy.testing.mock_MicroscopyOpticalChannel(name="MicroscopyOpticalChannel")
- nwbfile.add_lab_meta_data(lab_meta_data=optical_channel)
-
- variable_depth_microscopy_series = ndx_microscopy.testing.mock_VariableDepthMicroscopySeries(
- name="VariableDepthMicroscopySeries",
- microscope=microscope,
- light_source=light_source,
- imaging_space=imaging_space,
- optical_channel=optical_channel,
- )
- nwbfile.add_acquisition(nwbdata=variable_depth_microscopy_series)
-
- with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="w") as io:
- io.write(nwbfile)
-
- with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="r", load_namespaces=True) as io:
- read_nwbfile = io.read()
-
- self.assertContainerEqual(microscope, read_nwbfile.devices["Microscope"])
- self.assertContainerEqual(light_source, read_nwbfile.devices["MicroscopyLightSource"])
-
- self.assertContainerEqual(imaging_space, read_nwbfile.lab_meta_data["PlanarImagingSpace"])
- self.assertContainerEqual(optical_channel, read_nwbfile.lab_meta_data["MicroscopyOpticalChannel"])
-
- self.assertContainerEqual(
- variable_depth_microscopy_series, read_nwbfile.acquisition["VariableDepthMicroscopySeries"]
- )
-
-
-class TestMultiChannelMicroscopyVolumeSimpleRoundtrip(pynwb_TestCase):
- """Simple roundtrip test for MultiChannelMicroscopyVolume."""
-
- def setUp(self):
- self.nwbfile_path = "test_multi_channel_microscopy_volume_roundtrip.nwb"
-
- def tearDown(self):
- pynwb.testing.remove_test_file(self.nwbfile_path)
-
- def test_roundtrip(self):
- nwbfile = mock_NWBFile()
-
- microscope = ndx_microscopy.testing.mock_Microscope(name="Microscope")
- nwbfile.add_device(devices=microscope)
-
- imaging_space = ndx_microscopy.testing.mock_VolumetricImagingSpace(
- name="VolumetricImagingSpace", microscope=microscope
- )
- nwbfile.add_lab_meta_data(lab_meta_data=imaging_space) # Would prefer .add_imaging_space()
-
- light_sources = list()
- light_source_0 = ndx_microscopy.testing.mock_MicroscopyLightSource(name="LightSource")
- nwbfile.add_device(devices=light_source_0)
- light_sources.append(light_source_0)
-
- optical_channels = list()
- optical_channel_0 = ndx_microscopy.testing.mock_MicroscopyOpticalChannel(name="MicroscopyOpticalChannel")
- nwbfile.add_lab_meta_data(lab_meta_data=optical_channel_0)
- optical_channels.append(optical_channel_0)
-
- # TODO: It might be more convenient in Python to have a custom constructor that takes in a list of
- # light sources and optical channels and does the VectorData wrapping internally
- light_sources_used_by_volume = pynwb.base.VectorData(
- name="light_sources", description="Light sources used by this MultiChannelVolume.", data=light_sources
- )
- optical_channels_used_by_volume = pynwb.base.VectorData(
- name="optical_channels",
- description=(
- "Optical channels ordered to correspond to the third axis (e.g., [0, 0, :, 0]) "
- "of the data for this MultiChannelVolume."
- ),
- data=optical_channels,
- )
- multi_channel_microscopy_volume = ndx_microscopy.testing.mock_MultiChannelMicroscopyVolume(
- name="MultiChannelMicroscopyVolume",
- microscope=microscope,
- imaging_space=imaging_space,
- light_sources=light_sources_used_by_volume,
- optical_channels=optical_channels_used_by_volume,
- )
- nwbfile.add_acquisition(nwbdata=multi_channel_microscopy_volume)
-
- with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="w") as io:
- io.write(nwbfile)
-
- with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="r", load_namespaces=True) as io:
- read_nwbfile = io.read()
-
- self.assertContainerEqual(microscope, read_nwbfile.devices["Microscope"])
- self.assertContainerEqual(light_source_0, read_nwbfile.devices["LightSource"])
-
- self.assertContainerEqual(imaging_space, read_nwbfile.lab_meta_data["VolumetricImagingSpace"])
- self.assertContainerEqual(optical_channel_0, read_nwbfile.lab_meta_data["MicroscopyOpticalChannel"])
-
- self.assertContainerEqual(
- multi_channel_microscopy_volume, read_nwbfile.acquisition["MultiChannelMicroscopyVolume"]
- )
-
-
-class TestMicroscopySegmentationsSimpleRoundtrip(pynwb_TestCase):
- """Simple roundtrip test for MicroscopySegmentations."""
-
- def setUp(self):
- self.nwbfile_path = "test_microscopy_segmentations_roundtrip.nwb"
-
- def tearDown(self):
- pynwb.testing.remove_test_file(self.nwbfile_path)
-
- def test_roundtrip(self):
- nwbfile = mock_NWBFile()
-
- microscope = ndx_microscopy.testing.mock_Microscope(name="Microscope")
- nwbfile.add_device(devices=microscope)
-
- imaging_space = ndx_microscopy.testing.mock_PlanarImagingSpace(name="PlanarImagingSpace", microscope=microscope)
- nwbfile.add_lab_meta_data(lab_meta_data=imaging_space) # Would prefer .add_imaging_space()
-
- plane_segmentation_1 = ndx_microscopy.testing.mock_MicroscopyPlaneSegmentation(
- imaging_space=imaging_space, name="MicroscopyPlaneSegmentation1"
- )
- plane_segmentation_2 = ndx_microscopy.testing.mock_MicroscopyPlaneSegmentation(
- imaging_space=imaging_space, name="MicroscopyPlaneSegmentation2"
- )
- microscopy_plane_segmentations = [plane_segmentation_1, plane_segmentation_2]
-
- segmentations = ndx_microscopy.testing.mock_MicroscopySegmentations(
- name="MicroscopySegmentations", microscopy_plane_segmentations=microscopy_plane_segmentations
- )
- ophys_module = nwbfile.create_processing_module(name="ophys", description="")
- ophys_module.add(segmentations)
-
- with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="w") as io:
- io.write(nwbfile)
-
- with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="r", load_namespaces=True) as io:
- read_nwbfile = io.read()
-
- self.assertContainerEqual(microscope, read_nwbfile.devices["Microscope"])
-
- self.assertContainerEqual(imaging_space, read_nwbfile.lab_meta_data["PlanarImagingSpace"])
-
- self.assertContainerEqual(segmentations, read_nwbfile.processing["ophys"]["MicroscopySegmentations"])
-
-
-class TestMicroscopyResponseSeriesSimpleRoundtrip(pynwb_TestCase):
- """Simple roundtrip test for MicroscopyResponseSeries."""
-
- def setUp(self):
- self.nwbfile_path = "test_microscopy_response_series_roundtrip.nwb"
-
- def tearDown(self):
- pynwb.testing.remove_test_file(self.nwbfile_path)
-
- def test_roundtrip(self):
- nwbfile = mock_NWBFile()
-
- microscope = ndx_microscopy.testing.mock_Microscope(name="Microscope")
- nwbfile.add_device(devices=microscope)
-
- imaging_space = ndx_microscopy.testing.mock_PlanarImagingSpace(name="PlanarImagingSpace", microscope=microscope)
- nwbfile.add_lab_meta_data(lab_meta_data=imaging_space) # Would prefer .add_imaging_space()
-
- microscopy_plane_segmentations = ndx_microscopy.testing.mock_MicroscopyPlaneSegmentation(
- name="MicroscopyPlaneSegmentation", imaging_space=imaging_space
- )
-
- segmentations = ndx_microscopy.testing.mock_MicroscopySegmentations(
- name="MicroscopySegmentations", microscopy_plane_segmentations=[microscopy_plane_segmentations]
- )
- ophys_module = nwbfile.create_processing_module(name="ophys", description="")
- ophys_module.add(segmentations)
-
- number_of_rois = 10
- plane_segmentation_region = pynwb.ophys.DynamicTableRegion(
- name="table_region", # Name must be exactly this
- description="",
- data=[x for x in range(number_of_rois)],
- table=microscopy_plane_segmentations,
- )
- microscopy_response_series = ndx_microscopy.testing.mock_MicroscopyResponseSeries(
- name="MicroscopyResponseSeries",
- table_region=plane_segmentation_region,
- )
-
- microscopy_response_series_container = ndx_microscopy.MicroscopyResponseSeriesContainer(
- name="MicroscopyResponseSeriesContainer", microscopy_response_series=[microscopy_response_series]
- )
- ophys_module.add(microscopy_response_series_container)
-
- with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="w") as io:
- io.write(nwbfile)
-
- with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="r", load_namespaces=True) as io:
- read_nwbfile = io.read()
-
- self.assertContainerEqual(microscope, read_nwbfile.devices["Microscope"])
-
- self.assertContainerEqual(imaging_space, read_nwbfile.lab_meta_data["PlanarImagingSpace"])
-
- self.assertContainerEqual(segmentations, read_nwbfile.processing["ophys"]["MicroscopySegmentations"])
-
- self.assertContainerEqual(
- microscopy_response_series_container,
- read_nwbfile.processing["ophys"]["MicroscopyResponseSeriesContainer"],
- )
-
-
-if __name__ == "__main__":
- pytest.main() # Required since not a typical package structure
diff --git a/src/pynwb/tests/test_tetrodeseries.py b/src/pynwb/tests/test_tetrodeseries.py
new file mode 100644
index 0000000..dcdad60
--- /dev/null
+++ b/src/pynwb/tests/test_tetrodeseries.py
@@ -0,0 +1,126 @@
+"""Unit and integration tests for the example TetrodeSeries extension neurodata type.
+
+TODO: Modify these tests to test your extension neurodata type.
+"""
+
+import numpy as np
+
+from pynwb import NWBHDF5IO, NWBFile
+from pynwb.testing.mock.device import mock_Device
+from pynwb.testing.mock.ecephys import mock_ElectrodeGroup, mock_ElectrodeTable
+from pynwb.testing.mock.file import mock_NWBFile
+from pynwb.testing import TestCase, remove_test_file, NWBH5IOFlexMixin
+
+from ndx_microscopy import TetrodeSeries
+
+
+def set_up_nwbfile(nwbfile: NWBFile = None):
+ """Create an NWBFile with a Device, ElectrodeGroup, and 10 electrodes in the ElectrodeTable."""
+ nwbfile = nwbfile or mock_NWBFile()
+ device = mock_Device(nwbfile=nwbfile)
+ electrode_group = mock_ElectrodeGroup(device=device, nwbfile=nwbfile)
+ _ = mock_ElectrodeTable(n_rows=10, group=electrode_group, nwbfile=nwbfile)
+
+ return nwbfile
+
+
+class TestTetrodeSeriesConstructor(TestCase):
+ """Simple unit test for creating a TetrodeSeries."""
+
+ def setUp(self):
+ """Set up an NWB file. Necessary because TetrodeSeries requires references to electrodes."""
+ self.nwbfile = set_up_nwbfile()
+
+ def test_constructor(self):
+ """Test that the constructor for TetrodeSeries sets values as expected."""
+ all_electrodes = self.nwbfile.create_electrode_table_region(
+ region=list(range(0, 10)),
+ description="all the electrodes",
+ )
+
+ data = np.random.rand(100, 10)
+ tetrode_series = TetrodeSeries(
+ name="name",
+ description="description",
+ data=data,
+ rate=1000.0,
+ electrodes=all_electrodes,
+ trode_id=1,
+ )
+
+ self.assertEqual(tetrode_series.name, "name")
+ self.assertEqual(tetrode_series.description, "description")
+ np.testing.assert_array_equal(tetrode_series.data, data)
+ self.assertEqual(tetrode_series.rate, 1000.0)
+ self.assertEqual(tetrode_series.starting_time, 0)
+ self.assertEqual(tetrode_series.electrodes, all_electrodes)
+ self.assertEqual(tetrode_series.trode_id, 1)
+
+
+class TestTetrodeSeriesSimpleRoundtrip(TestCase):
+ """Simple roundtrip test for TetrodeSeries."""
+
+ def setUp(self):
+ self.nwbfile = set_up_nwbfile()
+ self.path = "test.nwb"
+
+ def tearDown(self):
+ remove_test_file(self.path)
+
+ def test_roundtrip(self):
+ """
+ Add a TetrodeSeries to an NWBFile, write it to file, read the file, and test that the TetrodeSeries from the
+ file matches the original TetrodeSeries.
+ """
+ all_electrodes = self.nwbfile.create_electrode_table_region(
+ region=list(range(0, 10)),
+ description="all the electrodes",
+ )
+
+ data = np.random.rand(100, 10)
+ tetrode_series = TetrodeSeries(
+ name="TetrodeSeries",
+ description="description",
+ data=data,
+ rate=1000.0,
+ electrodes=all_electrodes,
+ trode_id=1,
+ )
+
+ self.nwbfile.add_acquisition(tetrode_series)
+
+ with NWBHDF5IO(self.path, mode="w") as io:
+ io.write(self.nwbfile)
+
+ with NWBHDF5IO(self.path, mode="r", load_namespaces=True) as io:
+ read_nwbfile = io.read()
+ self.assertContainerEqual(tetrode_series, read_nwbfile.acquisition["TetrodeSeries"])
+
+
+class TestTetrodeSeriesRoundtripPyNWB(NWBH5IOFlexMixin, TestCase):
+ """Complex, more complete roundtrip test for TetrodeSeries using pynwb.testing infrastructure."""
+
+ def getContainerType(self):
+ return "TetrodeSeries"
+
+ def addContainer(self):
+ set_up_nwbfile(self.nwbfile)
+
+ all_electrodes = self.nwbfile.create_electrode_table_region(
+ region=list(range(0, 10)),
+ description="all the electrodes",
+ )
+
+ data = np.random.rand(100, 10)
+ tetrode_series = TetrodeSeries(
+ name="TetrodeSeries",
+ description="description",
+ data=data,
+ rate=1000.0,
+ electrodes=all_electrodes,
+ trode_id=1,
+ )
+ self.nwbfile.add_acquisition(tetrode_series)
+
+ def getContainer(self, nwbfile: NWBFile):
+ return nwbfile.acquisition["TetrodeSeries"]
diff --git a/src/spec/create_extension_spec.py b/src/spec/create_extension_spec.py
new file mode 100644
index 0000000..bcf17fd
--- /dev/null
+++ b/src/spec/create_extension_spec.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+import os.path
+
+from pynwb.spec import NWBNamespaceBuilder, export_spec, NWBGroupSpec, NWBAttributeSpec
+
+# TODO: import other spec classes as needed
+# from pynwb.spec import NWBDatasetSpec, NWBLinkSpec, NWBDtypeSpec, NWBRefSpec
+
+
+def main():
+ # these arguments were auto-generated from your cookiecutter inputs
+ ns_builder = NWBNamespaceBuilder(
+ name="""ndx-microscopy""",
+ version="""0.1.0""",
+ doc="""An NWB extension to demonstrate the TAB proposal for enhancements to optical physiology neurodata types.""",
+ author=[
+ "Alessandra Trapani",
+ "Cody Baker",
+ ],
+ contact=[
+ "alessandra.trapani@catalystneuro.com",
+ "cody.baker@catalystneuro.com",
+ ],
+ )
+ ns_builder.include_namespace("core")
+
+ # TODO: if your extension builds on another extension, include the namespace
+ # of the other extension below
+ # ns_builder.include_namespace("ndx-other-extension")
+
+ # TODO: define your new data types
+ # see https://pynwb.readthedocs.io/en/stable/tutorials/general/extensions.html
+ # for more information
+ tetrode_series = NWBGroupSpec(
+ neurodata_type_def="TetrodeSeries",
+ neurodata_type_inc="ElectricalSeries",
+ doc="An extension of ElectricalSeries to include the tetrode ID for each time series.",
+ attributes=[NWBAttributeSpec(name="trode_id", doc="The tetrode ID.", dtype="int32")],
+ )
+
+ # TODO: add all of your new data types to this list
+ new_data_types = [tetrode_series]
+
+ # export the spec to yaml files in the spec folder
+ output_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "spec"))
+ export_spec(ns_builder, new_data_types, output_dir)
+
+
+if __name__ == "__main__":
+ # usage: python create_extension_spec.py
+ main()