diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml new file mode 100644 index 0000000..6575870 --- /dev/null +++ b/.github/workflows/codespell.yml @@ -0,0 +1,14 @@ +name: Codespell +on: + pull_request: + workflow_dispatch: + +jobs: + codespell: + name: Check for spelling errors + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Codespell + uses: codespell-project/actions-codespell@v2 diff --git a/.github/workflows/run_all_tests.yml b/.github/workflows/run_all_tests.yml new file mode 100644 index 0000000..8f16226 --- /dev/null +++ b/.github/workflows/run_all_tests.yml @@ -0,0 +1,183 @@ +name: Run all tests +on: + pull_request: + schedule: + - cron: '0 5 * * 0' # once every Sunday at midnight ET + workflow_dispatch: + +jobs: + run-all-tests: + name: ${{ matrix.name }} + runs-on: ${{ matrix.os }} + defaults: + run: + shell: bash + strategy: + fail-fast: false + matrix: + include: + - { name: linux-python3.8-minimum , requirements: minimum , python-ver: "3.8" , os: ubuntu-latest } + - { name: linux-python3.8 , requirements: pinned , python-ver: "3.8" , os: ubuntu-latest } + - { name: linux-python3.9 , requirements: pinned , python-ver: "3.9" , os: ubuntu-latest } + - { name: linux-python3.10 , requirements: pinned , python-ver: "3.10", os: ubuntu-latest } + - { name: linux-python3.11 , requirements: pinned , python-ver: "3.11", os: ubuntu-latest } + - { name: linux-python3.12 , requirements: pinned , python-ver: "3.12", os: ubuntu-latest } + - { name: linux-python3.12-upgraded , requirements: upgraded , python-ver: "3.12", os: ubuntu-latest } + - { name: windows-python3.8-minimum , requirements: minimum , python-ver: "3.8" , os: windows-latest } + - { name: windows-python3.8 , requirements: pinned , python-ver: "3.8" , os: windows-latest } + - { name: windows-python3.9 , requirements: pinned , python-ver: "3.9" , os: windows-latest } + - { name: windows-python3.10 , requirements: pinned , python-ver: "3.10", os: windows-latest } + - { name: windows-python3.11 , requirements: pinned , python-ver: "3.11", os: windows-latest } + - { name: windows-python3.12 , requirements: pinned , python-ver: "3.12", os: windows-latest } + - { name: windows-python3.12-upgraded , requirements: upgraded , python-ver: "3.12", os: windows-latest } + - { name: macos-python3.8-minimum , requirements: minimum , python-ver: "3.8" , os: macos-latest } + - { name: macos-python3.8 , requirements: pinned , python-ver: "3.8" , os: macos-latest } + - { name: macos-python3.9 , requirements: pinned , python-ver: "3.9" , os: macos-latest } + - { name: macos-python3.10 , requirements: pinned , python-ver: "3.10", os: macos-latest } + - { name: macos-python3.11 , requirements: pinned , python-ver: "3.11", os: macos-latest } + - { name: macos-python3.12 , requirements: pinned , python-ver: "3.12", os: macos-latest } + - { name: macos-python3.12-upgraded , requirements: upgraded , python-ver: "3.12", os: macos-latest } + steps: + - name: Cancel non-latest runs + uses: styfle/cancel-workflow-action@0.11.0 + with: + all_but_latest: true + access_token: ${{ github.token }} + + - uses: actions/checkout@v3 + with: + submodules: 'recursive' + fetch-depth: 0 # fetch tags + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-ver }} + + - name: Install build dependencies + run: | + python -m pip install --upgrade pip + python -m pip list + python -m pip check + + - name: Install run requirements (minimum) + if: ${{ matrix.requirements == 'minimum' }} + run: | + python -m pip install -r requirements-min.txt -r requirements-dev.txt + python -m pip install -e . + + - name: Install run requirements (pinned) + if: ${{ matrix.requirements == 'pinned' }} + run: | + python -m pip install -r requirements-dev.txt + python -m pip install -e . + + - name: Install run requirements (upgraded) + if: ${{ matrix.requirements == 'upgraded' }} + run: | + python -m pip install -r requirements-dev.txt + python -m pip install -U -e . + + - name: Run tests + run: | + pytest -v + + - name: Build wheel and source distribution + run: | + python -m pip install --upgrade build + python -m build + ls -1 dist + + - name: Test installation from a wheel (POSIX) + if: ${{ matrix.os != 'windows-latest' }} + run: | + python -m venv test-wheel-env + source test-wheel-env/bin/activate + python -m pip install dist/*-none-any.whl + python -c "import ndx_microscopy" + + - name: Test installation from a wheel (windows) + if: ${{ matrix.os == 'windows-latest' }} + run: | + python -m venv test-wheel-env + test-wheel-env/Scripts/activate.bat + python -m pip install dist/*-none-any.whl + python -c "import ndx_microscopy" + + run-all-tests-on-conda: + name: ${{ matrix.name }} + runs-on: ubuntu-latest + defaults: + run: + shell: bash -l {0} # needed for conda environment to work + strategy: + fail-fast: false + matrix: + include: + - { name: conda-linux-python3.8-minimum , requirements: minimum , python-ver: "3.8" , os: ubuntu-latest } + - { name: conda-linux-python3.8 , requirements: pinned , python-ver: "3.8" , os: ubuntu-latest } + - { name: conda-linux-python3.9 , requirements: pinned , python-ver: "3.9" , os: ubuntu-latest } + - { name: conda-linux-python3.10 , requirements: pinned , python-ver: "3.10", os: ubuntu-latest } + - { name: conda-linux-python3.11 , requirements: pinned , python-ver: "3.11", os: ubuntu-latest } + - { name: conda-linux-python3.12 , requirements: pinned , python-ver: "3.12", os: ubuntu-latest } + - { name: conda-linux-python3.12-upgraded , requirements: upgraded , python-ver: "3.12", os: ubuntu-latest } + steps: + - name: Cancel any previous incomplete runs + uses: styfle/cancel-workflow-action@0.11.0 + with: + access_token: ${{ github.token }} + + - uses: actions/checkout@v3 + with: + submodules: 'recursive' + fetch-depth: 0 # fetch tags + + - name: Set up Conda + uses: conda-incubator/setup-miniconda@v2 + with: + auto-update-conda: true + auto-activate-base: true + activate-environment: true + python-version: ${{ matrix.python-ver }} + + - name: Install build dependencies + run: | + conda config --set always_yes yes --set changeps1 no + conda info + conda config --show-sources + conda list --show-channel-urls + + - name: Install run requirements (minimum) + if: ${{ matrix.requirements == 'minimum' }} + run: | + python -m pip install -r requirements-min.txt -r requirements-dev.txt + python -m pip install -e . + + - name: Install run requirements (pinned) + if: ${{ matrix.requirements == 'pinned' }} + run: | + python -m pip install -r requirements-dev.txt + python -m pip install -e . + + - name: Install run requirements (upgraded) + if: ${{ matrix.requirements == 'upgraded' }} + run: | + python -m pip install -r requirements-dev.txt + python -m pip install -U -e . + + - name: Run tests + run: | + pytest -v + + - name: Build wheel and source distribution + run: | + python -m pip install --upgrade build + python -m build + ls -1 dist + + - name: Test installation from a wheel (POSIX) + run: | + python -m venv test-wheel-env + source test-wheel-env/bin/activate + python -m pip install dist/*-none-any.whl + python -c "import ndx_microscopy" diff --git a/.github/workflows/run_coverage.yml b/.github/workflows/run_coverage.yml new file mode 100644 index 0000000..4883bf4 --- /dev/null +++ b/.github/workflows/run_coverage.yml @@ -0,0 +1,59 @@ +name: Run code coverage +on: + pull_request: + workflow_dispatch: + +jobs: + run-coverage: + name: ${{ matrix.os }} + runs-on: ${{ matrix.os }} + # TODO handle forks + # run pipeline on either a push event or a PR event on a fork + # if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name + defaults: + run: + shell: bash + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + env: # used by codecov-action + OS: ${{ matrix.os }} + PYTHON: '3.11' + steps: + - name: Cancel any previous incomplete runs + uses: styfle/cancel-workflow-action@0.11.0 + with: + all_but_latest: true + access_token: ${{ github.token }} + + - uses: actions/checkout@v3 + with: + submodules: 'recursive' + fetch-depth: 0 # fetch tags + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install -r requirements-dev.txt + + - name: Install package + run: | + python -m pip install -e . # must install in editable mode for coverage to find sources + python -m pip list + + - name: Run tests and generate coverage report + run: | + pytest --cov + python -m coverage xml # codecov uploader requires xml format + python -m coverage report -m + + # TODO uncomment after setting up repo on codecov.io + # - name: Upload coverage to Codecov + # uses: codecov/codecov-action@v3 + # with: + # fail_ci_if_error: true diff --git a/.gitignore b/.gitignore index cf7a218..a40ad0b 100644 --- a/.gitignore +++ b/.gitignore @@ -114,3 +114,6 @@ venv.bak/ # Mac finder .DS_Store + +# PyCharm +.idea/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..59980d0 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,17 @@ +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.6.0 + hooks: + - id: check-yaml + - id: check-toml + - id: end-of-file-fixer + - id: trailing-whitespace + - id: check-added-large-files +- repo: https://github.com/psf/black + rev: 24.4.2 + hooks: + - id: black +- repo: https://github.com/PyCQA/isort + rev: 5.13.2 + hooks: + - id: isort diff --git a/NEXTSTEPS.md b/NEXTSTEPS.md index 080f778..0faf651 100644 --- a/NEXTSTEPS.md +++ b/NEXTSTEPS.md @@ -107,7 +107,7 @@ with information on where to find your NWB extension. src: https://github.com/CodyCBakerPhD/ndx-microscopy pip: https://pypi.org/project/ndx-microscopy/ license: MIT - maintainers: + maintainers: - CodyCBakerPhD ``` diff --git a/README.md b/README.md index b1b6576..6116b3e 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,190 @@ # ndx-microscopy Extension for NWB -Description of the extension +An enhancement to core NWB schema types related to microscopy data. + +Planned for an eventual NWBEP with the TAB. + ## Installation +``` +git clone https://github.com/catalystneuro/ndx-microscopy +pip install ndx-microscopy +``` + ## Usage ```python +# TODO +``` + + +## Entity relationship diagram + +```mermaid +%%{init: {'theme': 'base', 'themeVariables': {'primaryColor': '#ffffff', "primaryBorderColor': '#144E73', 'lineColor': '#D96F32'}}}%% + + +classDiagram + direction BT + + class MicroscopySeries { + <> + + -------------------------------------- + links + -------------------------------------- + microscope : Microscope + light_source : LightSource + optical_channel : MicroscopyOpticalChannel + } + + class PlanarMicroscopySeries { + <> + + -------------------------------------- + datasets + -------------------------------------- + data : numeric, frame x height x width + --> unit : text + + -------------------------------------- + links + -------------------------------------- + imaging_space : PlanarImagingSpace + } + + class VariableDepthMicroscopySeries { + <> + + -------------------------------------- + datasets + -------------------------------------- + data : numeric, frame x height x width + --> unit : text + depth_per_frame : numeric, length of frames + -- > unit : text, default="micrometers" + + -------------------------------------- + links + -------------------------------------- + imaging_space : PlanarImagingSpace + } + + class VolumetricMicroscopySeries { + <> + + -------------------------------------- + datasets + -------------------------------------- + data : numeric, frame x height x width x depth + --> unit : text + + -------------------------------------- + links + -------------------------------------- + imaging_space : VolumetricImageSpace + } + + class ImagingSpace{ + <> + + -------------------------------------- + datasets + -------------------------------------- + description : text + origin_coordinates : numeric, length 3, optional + --> unit : text, default="micrometers" + + -------------------------------------- + attributes + -------------------------------------- + location : text, optional + } + + class PlanarImagingSpace{ + <> + + -------------------------------------- + datasets + -------------------------------------- + grid_spacing : numeric, length 2, optional + --> unit : text, default="micrometers" + + -------------------------------------- + attributes + -------------------------------------- + reference_frame : text, optional + } + + class VolumetricImagingSpace{ + <> + + -------------------------------------- + datasets + -------------------------------------- + grid_spacing : numeric, length 2, optional + --> unit : text, default="micrometers" + + -------------------------------------- + attributes + -------------------------------------- + reference_frame : text, optional + } + + class MicroscopyOpticalChannel{ + <> + + -------------------------------------- + datasets + -------------------------------------- + description : text + + -------------------------------------- + attributes + -------------------------------------- + indicator : text + filter_description : text, optional + emission_wavelength_in_nm : numeric, optional + } + + class LightSource{ + <> + + -------------------------------------- + attributes + -------------------------------------- + model : text, optional + filter_description : text, optional + excitation_wavelength_in_nm : numeric, optional + peak_power_in_W : numeric, optional + peak_pulse_energy_in_J : numeric, optional + intensity_in_W_per_m2 : numeric, optional + exposure_time_in_s : numeric, optional + pulse_rate_in_Hz : numeric, optional + } + + class Microscope{ + <> + + -------------------------------------- + attributes + -------------------------------------- + model : text, optional + } + PlanarMicroscopySeries *-- MicroscopySeries : extends + PlanarMicroscopySeries -- PlanarImagingSpace : links + VariableDepthMicroscopySeries *-- MicroscopySeries : extends + VariableDepthMicroscopySeries -- PlanarImagingSpace : links + VolumetricMicroscopySeries *-- MicroscopySeries : extends + VolumetricMicroscopySeries -- VolumetricImagingSpace : links + PlanarImagingSpace *-- ImagingSpace : extends + VolumetricImagingSpace *-- ImagingSpace : extends + MicroscopySeries ..> Microscope : links + MicroscopySeries ..> LightSource : links + MicroscopySeries ..> MicroscopyOpticalChannel : links ``` --- diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index 54e6545..0000000 --- a/docs/Makefile +++ /dev/null @@ -1,179 +0,0 @@ - -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -SPHINXAPIDOC = sphinx-apidoc -PAPER = -BUILDDIR = build -SRCDIR = ../src -RSTDIR = source -CONFDIR = $(PWD)/source - - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext fulldoc allclean - -help: - @echo "To update documentation sources from the format specification please use \`make apidoc'" - @echo "" - @echo "To build the documentation please use \`make ' where is one of" - @echo " fulldoc to rebuild the apidoc, html, and latexpdf all at once" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - @echo " apidoc to to build RST from source code" - @echo " clean to clean all documents built by Sphinx in _build" - @echo " allclean to clean all autogenerated documents both from Sphinx and apidoc" - -allclean: - -rm -rf $(BUILDDIR)/* $(RSTDIR)/modules.rst - -rm $(RSTDIR)/_format_auto_docs/*.png - -rm $(RSTDIR)/_format_auto_docs/*.pdf - -rm $(RSTDIR)/_format_auto_docs/*.rst - -rm $(RSTDIR)/_format_auto_docs/*.inc - -clean: - -rm -rf $(BUILDDIR)/* $(RSTDIR)/modules.rst - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/sample.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/sample.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/sample" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/sample" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " "results in $(BUILDDIR)/doctest/output.txt." - -apidoc: - PYTHONPATH=$(CONFDIR):$(PYTHONPATH) nwb_generate_format_docs - @echo - @echo "Generate rst source files from NWB spec." - -fulldoc: - $(MAKE) allclean - @echo - @echo "Rebuilding apidoc, html, latexpdf" - $(MAKE) apidoc - $(MAKE) html - $(MAKE) latexpdf diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 9a3a30d..0000000 --- a/docs/README.md +++ /dev/null @@ -1,121 +0,0 @@ - -# Getting started - -## Generate Documentation - -* To generate the HTML version of your documentation run ``make html``. -* The [hdmf-docutils](https://pypi.org/project/hdmf-docutils/) package must be installed. - -## Customize Your Extension Documentation - -* **extension description** - * Edit ``source/description.rst`` to describe your extension. - -* **release notes** - * Edit ``source/release_notes.rst`` to document improvements and fixes of your extension. - -* **documentation build settings** - * Edit ``source/conf.py`` to customize your extension documentation configuration. - * Edit ``source/conf_doc_autogen.py`` to customize the format documentation auto-generation based on - the YAML specification files. - - -# Overview - -The specification documentation uses Sphinx [http://www.sphinx-doc.org/en/stable/index.html](http://www.sphinx-doc.org/en/stable/index.html) - -## Rebuilding All - -To rebuild the full documentation in html, latex, and PDF simply run: - -``` -make fulldoc -``` - -This is a convenience function that is equivalent to: - -``` -make allclean -make apidoc -make html -make latexpdf -``` - -## Generating the format documentation from the format spec - -The format documentation is auto-generated from the format specification (YAML) sources via: - -``` -make apidoc -``` - -This will invoke the executable: - -``` -hdmf_generate_format_docs -``` - -The script automatically generates a series of .rst, .png, and .pdf files that are stored in the folder `source/format_auto_docs`. The generated .rst files are included in `source/format.rst` and the png and pdf files are used as figures in the autogenerated docs. - -The folder `source/format_auto_docs` is reserved for autogenerated files, i.e., files in the folder should not be added or edited by hand as they will be deleted and rebuilt during the full built of the documentation. - -By default the Sphinx configuration is setup to always regenerate the sources whenever the docs are being built (see next section). This behavior can be customized via the `spec_doc_rebuild_always` parameter in `source/conf.py` - -## Building a specific document type - -To build the documentation, run: - -``` -make -``` - -where `` is, e.g., `latexpdf`, `html`, `singlehtml`, or `man`. For a complete list of supported doc-types, see: - -``` -make help -``` - -## Cleaning up - -`make clean` cleans up all builds of the documentation located in `_build`. - -`make allclean` cleans up all builds of the documentation located in `_build` as well as all autogenerated sources stored in `source/format_auto_docs`. - -## Configuration - -The build of the documentation can be customized via a broad range of Sphinx options in: - -`source/conf_doc_autogen.py` - -In addition to standard Sphinx options, there are a number of additional options used to customize the content and structure of the autogenerated documents, e.g.: - -* `spec_show_yaml_src` - Boolean indicating whether the YAML sources should be included for the different Neurodata types -* `spec_generate_src_file` - Boolean indicating whether the YAML sources of the neurodata_types should be rendered in a separate section (True) or in the same location as the main documentation -* `spec_show_hierarchy_plots` - Boolean indicating whether we should generate and show figures of the hierarchy defined by the specifications as part of the documentation -* `spec_file_per_type` - Boolean indicating whether we should generate separate .inc reStructuredText for each neurodata_type (True) -or should all text be added to the main file (False) -* `spec_show_subgroups_in_tables` - Should subgroups of the main groups be rendered in the table as well. Usually this is disabled since groups are rendered as separate sections in the text -* `spec_appreviate_main_object_doc_in_tables` - Abbreviate the documentation of the main object for which a table is rendered in the table. This is commonly set to True as doc of the main object is already rendered as the main intro for the section describing the object -* `spec_show_title_for_tables` - Add a title for the table showing the specifications. -* `spec_show_subgroups_in_seperate_table` - Should top-level subgroups be listed in a separate table or as part of the main dataset and attributes table -* `spec_table_depth_char` - Char to be used as prefix to indicate the depth of an object in the specification hierarchy. NOTE: The char used should be supported by LaTeX. -* `spec_add_latex_clearpage_after_ndt_sections` - Add a LaTeX clearpage after each main section describing a neurodata_type. This helps in LaTeX to keep the ordering of figures, tables, and code blocks consistent in particular when the hierarchy_plots are included. -* `spec_resolve_type_inc` - Resolve includes to always show the full list of objects that are part of a type (True) or to show only the parts that are actually new to a current type while only linking to base types (False) - -In addition, the location of the input format specification can be customized as follows: - -* `spec_input_spec_dir` - Directory where the YAML files for the namespace to be documented are located -* `spec_input_namespace_filename` - Name of the YAML file with the specification of the Namespace to be documented -* `spec_input_default_namespace` - Name of the default namespace in the file - -Finally, the name and location of output files can be customized as follows: - -* `spec_output_dir` - Directory where the autogenerated files should be stored -* `spec_output_master_filename` - Name of the master .rst file that includes all the autogenerated docs -* `spec_output_doc_filename` - Name of the file where the main documentation goes -* `spec_output_src_filename` - Name of the file where the sources of the format spec go. NOTE: This file is only generated if `spec_generate_src_file` is enabled -* `spec_output_doc_type_hierarchy_filename` - Name of the file containing the type hierarchy. (Included in `spec_output_doc_filename`) - -In the regular Sphinx `source/conf.py` file, we can then also set: - -* `spec_doc_rebuild_always` - Boolean to define whether to always rebuild the source docs from YAML when doing a regular build of the sources (e.g., via `make html`) even if the folder with the source files already exists diff --git a/docs/make.bat b/docs/make.bat deleted file mode 100644 index dc1312a..0000000 --- a/docs/make.bat +++ /dev/null @@ -1,35 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=source -set BUILDDIR=build - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.https://www.sphinx-doc.org/ - exit /b 1 -) - -if "%1" == "" goto help - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% - -:end -popd diff --git a/docs/source/_static/theme_overrides.css b/docs/source/_static/theme_overrides.css deleted file mode 100644 index 63ee6cc..0000000 --- a/docs/source/_static/theme_overrides.css +++ /dev/null @@ -1,13 +0,0 @@ -/* override table width restrictions */ -@media screen and (min-width: 767px) { - - .wy-table-responsive table td { - /* !important prevents the common CSS stylesheets from overriding - this as on RTD they are loaded after this stylesheet */ - white-space: normal !important; - } - - .wy-table-responsive { - overflow: visible !important; - } -} diff --git a/docs/source/conf.py b/docs/source/conf.py deleted file mode 100644 index 90cb378..0000000 --- a/docs/source/conf.py +++ /dev/null @@ -1,112 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# For the full list of built-in configuration values, see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Project information ----------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information - -project = 'ndx-microscopy' -copyright = '2023, Cody Baker and Alessandra Trapani' -author = 'Cody Baker and Alessandra Trapani' - -version = '0.1.0' -release = 'alpha' - -# -- General configuration --------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration - -extensions = [ - 'sphinx.ext.ifconfig', - 'sphinx.ext.autodoc', - 'sphinx.ext.intersphinx', -] - -templates_path = ['_templates'] -exclude_patterns = [] - -language = 'en' - -# -- Options for HTML output ------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output - -html_theme = 'alabaster' -html_static_path = ['_static'] - -# -- Options for intersphinx extension --------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#configuration - -intersphinx_mapping = { - 'python': ('https://docs.python.org/3', None), -} - - -############################################################################ -# CUSTOM CONFIGURATIONS ADDED BY THE NWB TOOL FOR GENERATING FORMAT DOCS -########################################################################### - -import sphinx_rtd_theme # noqa: E402 -import textwrap # noqa: E402 - -# -- Options for intersphinx --------------------------------------------- -intersphinx_mapping.update({ - 'core': ('https://nwb-schema.readthedocs.io/en/latest/', None), - 'hdmf-common': ('https://hdmf-common-schema.readthedocs.io/en/latest/', None), -}) - -# -- Generate sources from YAML--------------------------------------------------- -# Always rebuild the source docs from YAML even if the folder with the source files already exists -spec_doc_rebuild_always = True - - -def run_doc_autogen(_): - # Execute the autogeneration of Sphinx format docs from the YAML sources - import sys - import os - conf_file_dir = os.path.dirname(os.path.abspath(__file__)) - sys.path.append(conf_file_dir) # Need so that generate format docs can find the conf_doc_autogen file - from conf_doc_autogen import spec_output_dir - - if spec_doc_rebuild_always or not os.path.exists(spec_output_dir): - sys.path.append('./docs') # needed to enable import of generate_format docs - from hdmf_docutils.generate_format_docs import main as generate_docs - generate_docs() - - -def setup(app): - app.connect('builder-inited', run_doc_autogen) - # overrides for wide tables in RTD theme - try: - app.add_css_file("theme_overrides.css") # Used by newer Sphinx versions - except AttributeError: - app.add_stylesheet("theme_overrides.css") # Used by older version of Sphinx - -# -- Customize sphinx settings -numfig = True -autoclass_content = 'both' -autodoc_docstring_signature = True -autodoc_member_order = 'bysource' -add_function_parentheses = False - - -# -- HTML sphinx options -html_theme = "sphinx_rtd_theme" -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] - -# LaTeX Sphinx options -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - 'preamble': textwrap.dedent( - ''' - \\setcounter{tocdepth}{3} - \\setcounter{secnumdepth}{6} - \\usepackage{enumitem} - \\setlistdepth{100} - '''), -} diff --git a/docs/source/conf_doc_autogen.py b/docs/source/conf_doc_autogen.py deleted file mode 100644 index aed891b..0000000 --- a/docs/source/conf_doc_autogen.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- coding: utf-8 -*- -# Configuration file for generating sources for the format documentation from the YAML specification files - -import os - -# -- Input options for the specification files to be used ----------------------- - -# Directory where the YAML files for the namespace to be documented are located -spec_input_spec_dir = '..\spec' - -# Name of the YAML file with the specification of the Namespace to be documented -spec_input_namespace_filename = 'ndx-microscopy.namespace.yaml' - -# Name of the default namespace in the file -spec_input_default_namespace = 'ndx-microscopy' - - -# -- Options for customizing the locations of output files - -# Directory where the autogenerated files should be stored -spec_output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "_format_auto_docs") - -# Name of the master rst file that includes all the autogenerated docs -spec_output_master_filename = 'format_spec_main.inc' - -# Name of the file where the main documentation goes -spec_output_doc_filename = 'format_spec_doc.inc' - -# Name of the file where the sources of the format spec go. NOTE: This file is only generated if -# spec_generate_src_file is enabled -spec_output_src_filename = 'format_spec_sources.inc' - -# Name of the file containing the type hierarchy. (Included in spec_output_doc_filename) -spec_output_doc_type_hierarchy_filename = 'format_spec_type_hierarchy.inc' - -# Clean up the output directory before we build if the git hash is out of date -spec_clean_output_dir_if_old_git_hash = True - -# Do not rebuild the format sources if we have previously build the sources and the git hash matches -spec_skip_doc_autogen_if_current_git_hash = False - - -# -- Options for the generation of the documentation from source ---------------- - -# Should the YAML sources be included for the different modules -spec_show_yaml_src = True - -# Show figure of the hierarchy of objects defined by the spec -spec_show_hierarchy_plots = True - -# Should the sources of the neurodata_types (YAML) be rendered in a separate section (True) or -# in the same location as the base documentation -spec_generate_src_file = True - -# Should separate .inc reStructuredText files be generated for each neurodata_type (True) -# or should all text be added to the main file -spec_file_per_type = True - -# Should top-level subgroups be listed in a separate table or as part of the main dataset and attributes table -spec_show_subgroups_in_seperate_table = True - -# Abbreviate the documentation of the main object for which a table is rendered in the table. -# This is commonly set to True as doc of the main object is alrready rendered as the main intro for the -# section describing the object -spec_appreviate_main_object_doc_in_tables = True - -# Show a title for the tables -spec_show_title_for_tables = True - -# Char to be used as prefix to indicate the depth of an object in the specification hierarchy -spec_table_depth_char = '.' # '→' '.' - -# Add a LaTeX clearpage after each main section describing a neurodata_type. This helps in LaTeX to keep the ordering -# of figures, tables, and code blocks consistent in particular when the hierarchy_plots are included -spec_add_latex_clearpage_after_ndt_sections = True - -# Resolve includes to always show the full list of objects that are part of a type (True) -# or to show only the parts that are actually new to a current type while only linking to base types -spec_resolve_type_inc = False - -# Default type map to be used. This is the type map where dependent namespaces are stored. In the case of -# NWB this is spec_default_type_map = pynwb.get_type_map() -import pynwb # noqa: E402 -spec_default_type_map = pynwb.get_type_map() - -# Default specification classes for groups datasets and namespaces. In the case of NWB these are the NWB-specfic -# spec classes. In the general cases these are the spec classes from HDMF -spec_group_spec_cls = pynwb.spec.NWBGroupSpec -spec_dataset_spec_cls = pynwb.spec.NWBDatasetSpec -spec_namespace_spec_cls = pynwb.spec.NWBNamespace diff --git a/docs/source/credits.rst b/docs/source/credits.rst deleted file mode 100644 index da5cda1..0000000 --- a/docs/source/credits.rst +++ /dev/null @@ -1,21 +0,0 @@ -******* -Credits -******* - -.. note:: - Add the credits for your extension here - -Acknowledgments -=============== - - -Authors -======= - - -***** -Legal -***** - -License -======= diff --git a/docs/source/description.rst b/docs/source/description.rst deleted file mode 100644 index 6f8553e..0000000 --- a/docs/source/description.rst +++ /dev/null @@ -1,5 +0,0 @@ -Overview -======== - -.. note:: - Add the description of your extension here diff --git a/docs/source/format.rst b/docs/source/format.rst deleted file mode 100644 index 4b88782..0000000 --- a/docs/source/format.rst +++ /dev/null @@ -1,12 +0,0 @@ - -.. _ndx-microscopy: - -************** -ndx-microscopy -************** - -Version |release| |today| - -.. .. contents:: - -.. include:: _format_auto_docs/format_spec_main.inc diff --git a/docs/source/index.rst b/docs/source/index.rst deleted file mode 100644 index 207c9a0..0000000 --- a/docs/source/index.rst +++ /dev/null @@ -1,30 +0,0 @@ -Specification for the ndx-microscopy extension -============================================== - -.. toctree:: - :numbered: - :maxdepth: 8 - :caption: Table of Contents - - description - -.. toctree:: - :numbered: - :maxdepth: 3 - :caption: Extension Specification - - format - -.. toctree:: - :maxdepth: 2 - :caption: History & Legal - - release_notes - credits - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/docs/source/release_notes.rst b/docs/source/release_notes.rst deleted file mode 100644 index 39ccd1c..0000000 --- a/docs/source/release_notes.rst +++ /dev/null @@ -1,5 +0,0 @@ -Release Notes -============= - -.. note:: - Add the release notes of your extension here diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..2b90ddd --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,25 @@ +[tool.black] +line-length = 120 +target-version = ['py39', 'py310', 'py311', 'py312'] +include = '\.pyi?$' +extend-exclude = ''' +/( + \.toml + |\.yml + |\.txt + |\.sh + |\.git + |\.ini + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | build + | dist +)/ +''' + +[tool.isort] +profile = "black" +reverse_relative = true +known_first_party = ["ndx_microscopy"] diff --git a/requirements-dev.txt b/requirements-dev.txt index 1482a06..9955dec 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,5 +1,2 @@ -# pinned dependencies to reproduce an entire development environment to run tests and check code style -flake8==4.0.1 -pytest==6.2.5 -pytest-subtests==0.6.0 -hdmf-docutils==0.4.4 +pytest +pytest-cov diff --git a/requirements-min.txt b/requirements-min.txt new file mode 100644 index 0000000..cd0c3e6 --- /dev/null +++ b/requirements-min.txt @@ -0,0 +1 @@ +pynwb diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 1709b58..0000000 --- a/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -# pinned dependencies to reproduce a working development environment -hdmf==3.1.1 -pynwb==2.0.0 diff --git a/setup.py b/setup.py index 25d401a..33da03e 100644 --- a/setup.py +++ b/setup.py @@ -1,65 +1,62 @@ # -*- coding: utf-8 -*- import os - -from setuptools import setup, find_packages from shutil import copy2 +from setuptools import find_packages, setup + # load README.md/README.rst file try: - if os.path.exists('README.md'): - with open('README.md', 'r') as fp: + if os.path.exists("README.md"): + with open("README.md", "r") as fp: readme = fp.read() - readme_type = 'text/markdown; charset=UTF-8' - elif os.path.exists('README.rst'): - with open('README.rst', 'r') as fp: + readme_type = "text/markdown; charset=UTF-8" + elif os.path.exists("README.rst"): + with open("README.rst", "r") as fp: readme = fp.read() - readme_type = 'text/x-rst; charset=UTF-8' + readme_type = "text/x-rst; charset=UTF-8" else: readme = "" except Exception: readme = "" setup_args = { - 'name': 'ndx-microscopy', - 'version': '0.1.0', - 'description': 'An example extension to demonstrate the TAB proposal for enhancements to optical physiology neurodata types.', - 'long_description': readme, - 'long_description_content_type': readme_type, - 'author': 'Cody Baker and Alessandra Trapani', - 'author_email': 'cody.baker@catalystneuro.com', - 'url': '', - 'license': 'MIT', - 'install_requires': [ - 'pynwb>=1.5.0,<3', - 'hdmf>=2.5.6,<4', + "name": "ndx-microscopy", + "version": "0.1.0", + "description": "An example extension to demonstrate the TAB proposal for enhancements to optical physiology neurodata types.", + "long_description": readme, + "long_description_content_type": readme_type, + "author": "Cody Baker and Alessandra Trapani", + "author_email": "cody.baker@catalystneuro.com", + "url": "", + "license": "MIT", + "install_requires": [ + "pynwb>=1.5.0,<3", + "hdmf>=2.5.6,<4", ], - 'packages': find_packages('src/pynwb', exclude=["tests", "tests.*"]), - 'package_dir': {'': 'src/pynwb'}, - 'package_data': {'ndx_microscopy': [ - 'spec/ndx-microscopy.namespace.yaml', - 'spec/ndx-microscopy.extensions.yaml', - ]}, - 'classifiers': [ + "packages": find_packages("src/pynwb", exclude=["tests", "tests.*"]), + "package_dir": {"": "src/pynwb"}, + "package_data": { + "ndx_microscopy": [ + "spec/ndx-microscopy.namespace.yaml", + "spec/ndx-microscopy.extensions.yaml", + ] + }, + "classifiers": [ "Intended Audience :: Developers", "Intended Audience :: Science/Research", - "License :: OSI Approved :: MIT License" - ], - 'keywords': [ - 'NeurodataWithoutBorders', - 'NWB', - 'nwb-extension', - 'ndx-extension' + "License :: OSI Approved :: MIT License", ], - 'zip_safe': False + "keywords": ["NeurodataWithoutBorders", "NWB", "nwb-extension", "ndx-extension"], + "zip_safe": False, } def _copy_spec_files(project_dir): - ns_path = os.path.join(project_dir, 'spec', 'ndx-microscopy.namespace.yaml') - ext_path = os.path.join(project_dir, 'spec', 'ndx-microscopy.extensions.yaml') + ns_path = os.path.join(project_dir, "spec", "ndx-microscopy.namespace.yaml") + ext_path = os.path.join(project_dir, "spec", "ndx-microscopy.extensions.yaml") - dst_dir = os.path.join(project_dir, 'src', 'pynwb', 'ndx_microscopy', 'spec') + dst_dir = os.path.join(project_dir, "src", "pynwb", "ndx_microscopy", "spec") if not os.path.exists(dst_dir): os.mkdir(dst_dir) @@ -67,6 +64,6 @@ def _copy_spec_files(project_dir): copy2(ext_path, dst_dir) -if __name__ == '__main__': +if __name__ == "__main__": _copy_spec_files(os.path.dirname(__file__)) setup(**setup_args) diff --git a/spec/ndx-microscopy.extensions.yaml b/spec/ndx-microscopy.extensions.yaml index fa9a0ff..f76bc1d 100644 --- a/spec/ndx-microscopy.extensions.yaml +++ b/spec/ndx-microscopy.extensions.yaml @@ -1,8 +1,260 @@ groups: -- neurodata_type_def: TetrodeSeries - neurodata_type_inc: ElectricalSeries - doc: An extension of ElectricalSeries to include the tetrode ID for each time series. - attributes: - - name: trode_id - dtype: int32 - doc: The tetrode ID. + + - neurodata_type_def: Microscope + neurodata_type_inc: Device + doc: A microscope used to acquire imaging data. + attributes: + - name: model + dtype: text + doc: Model identifier of the light source device. + required: false + + - neurodata_type_def: LightSource + neurodata_type_inc: Device + doc: Light source used to illuminate an imaging space. + attributes: + - name: model + dtype: text + doc: Model identifier of the light source device. + required: false + - name: filter_description + dtype: text + doc: Filter used to obtain the excitation wavelength of light, e.g. 'Short pass at 1040 nm'. + required: false + - name: excitation_wavelength_in_nm + dtype: numeric + doc: Excitation wavelength of light, in nanometers. + required: false + - name: peak_power_in_W + dtype: numeric + doc: Incident power of stimulation device (in Watts). + required: false + - name: peak_pulse_energy_in_J + dtype: numeric + doc: If device is pulsed light source, pulse energy (in Joules). + required: false + - name: intensity_in_W_per_m2 + dtype: numeric + doc: Intensity of the excitation in W/m^2, if known. + required: false + - name: exposure_time_in_s + dtype: numeric + doc: Exposure time of the sample (in sec). + required: false + - name: pulse_rate_in_Hz + dtype: numeric + doc: If device is pulsed light source, pulse rate (in Hz) used for stimulation. + required: false + + # Microscopy is added on to this only to differentiate from the OpticalChannel in the core namespace + # It would be removed when this structure is merged to core + - neurodata_type_def: MicroscopyOpticalChannel + neurodata_type_inc: LabMetaData # Would prefer basic NWBContainer + doc: An optical channel used to filter light emission from an imaging space. + datasets: + - name: description + doc: Description or other notes about the channel. + dtype: text + attributes: + - name: indicator + doc: Identifier for the indicator pertaining to this optical channel. + dtype: text + - name: filter_description + doc: Metadata information about the filter used by this optical channel. + dtype: text + required: false + - name: emission_wavelength_in_nm + doc: Emission wavelength for this optical channel, in nanometers. + dtype: numeric + required: false + + - neurodata_type_def: ImagingSpace + neurodata_type_inc: LabMetaData # Would prefer basic NWBContainer + doc: Metadata about the region of physical space that imaging data was recorded from. + datasets: + - name: description + dtype: text + doc: Description of the imaging space. + - name: origin_coordinates + dtype: float64 + dims: + - - x, y, z + shape: + - - 3 + doc: Physical location in stereotactic coordinates for the first element of the grid. + See reference_frame to determine what the coordinates are relative to (e.g., bregma). + quantity: '?' + attributes: + - name: unit + dtype: text + default_value: micrometers + doc: Measurement units for origin coordinates. The default value is 'micrometers'. + attributes: + - name: location + dtype: text + doc: General estimate of location in the brain being subset by this space. + Specify the area, layer, etc. + Use standard atlas names for anatomical regions when possible. + Specify 'whole-brain' if the entire brain is strictly contained within the space. + required: false + links: + - name: microscope + target_type: Microscope + doc: Link to Microscope object which contains metadata about the device which imaged this space. + + - neurodata_type_def: PlanarImagingSpace + neurodata_type_inc: ImagingSpace + doc: Metadata about the 2-dimensional slice of physical space that imaging data was recorded from. + datasets: + - name: grid_spacing + dtype: float64 + dims: + - - x, y + shape: + - - 2 + doc: Amount of space between pixels in the specified unit. + Specify 'z' only when imaging volume is a regular grid; otherwise only specify 'x' and 'y'. + See origin_coordinates to determine where the grid begins. + quantity: '?' +# TODO: deal with grid_spacing units +# attributes: +# - name: unit +# dtype: text +# default_value: micrometers +# doc: Measurement units for grid spacing. The default value is 'micrometers'. + attributes: + - name: reference_frame + dtype: text + doc: Describes the reference frame of origin_coordinates and grid_spacing. + For example, this can be a text description of the anatomical location and orientation of the grid + defined by origin_coords and grid_spacing or the vectors needed to transform or rotate the grid to + a common anatomical axis (e.g., AP/DV/ML). + This field is necessary to interpret origin_coords and grid_spacing. + If origin_coords and grid_spacing are not present, then this field is not required. + For example, if the microscope returns 10 x 10 images, where the first value of the data matrix + (index (0, 0)) corresponds to (-1.2, -0.6, -2) mm relative to bregma, the spacing between pixels is 0.2 mm in + x, 0.2 mm in y and 0.5 mm in z, and larger numbers in x means more anterior, larger numbers in y means more + rightward, and larger numbers in z means more ventral, then enter the following -- + origin_coords = (-1.2, -0.6, -2) + grid_spacing = (0.2, 0.2) + reference_frame = "Origin coordinates are relative to bregma. First dimension corresponds to anterior-posterior + axis (larger index = more anterior). Second dimension corresponds to medial-lateral axis (larger index = more + rightward). Third dimension corresponds to dorsal-ventral axis (larger index = more ventral)." + required: false + + - neurodata_type_def: VolumetricImagingSpace + neurodata_type_inc: ImagingSpace + doc: Metadata about the 3-dimensional region of physical space that imaging data was recorded from. + datasets: + - name: grid_spacing + doc: Amount of space between pixels in (x, y) or voxels in (x, y, z), in the specified unit. + Specify 'z' only when imaging volume is a regular grid; otherwise only specify 'x' and 'y'. + See origin_coordinates to determine where the grid begins. + dtype: float64 + dims: + - - x, y, z + shape: + - - 3 + quantity: '?' +# TODO: deal with grid_spacing units +# attributes: +# - name: unit +# dtype: text +# default_value: micrometers +# doc: Measurement units for grid spacing. The default value is 'micrometers'. + attributes: + - name: reference_frame + doc: Describes the reference frame of origin_coordinates and grid_spacing. + For example, this can be a text description of the anatomical location and orientation of the grid + defined by origin_coords and grid_spacing or the vectors needed to transform or rotate the grid to + a common anatomical axis (e.g., AP/DV/ML). + This field is necessary to interpret origin_coords and grid_spacing. + If origin_coords and grid_spacing are not present, then this field is not required. + For example, if the microscope returns 10 x 10 x 2 images, where the first value of the data matrix + (index (0, 0, 0)) corresponds to (-1.2, -0.6, -2) mm relative to bregma, the spacing between pixels is 0.2 mm in + x, 0.2 mm in y and 0.5 mm in z, and larger numbers in x means more anterior, larger numbers in y means more + rightward, and larger numbers in z means more ventral, then enter the following -- + origin_coords = (-1.2, -0.6, -2) + grid_spacing = (0.2, 0.2, 0.5) + reference_frame = "Origin coordinates are relative to bregma. First dimension corresponds to anterior-posterior + axis (larger index = more anterior). Second dimension corresponds to medial-lateral axis (larger index = more + rightward). Third dimension corresponds to dorsal-ventral axis (larger index = more ventral)." + dtype: text + required: false + + - neurodata_type_def: MicroscopySeries + neurodata_type_inc: TimeSeries + doc: Imaging data acquired over time from an optical channel in a microscope while a light source illuminates the + imaging space. + links: + - name: microscope + doc: Link to a Microscope object containing metadata about the device used to acquire this imaging data. + target_type: Microscope + - name: light_source + doc: Link to a LightSource object containing metadata about the device used to illuminate the imaging space. + target_type: LightSource + - name: optical_channel + doc: Link to a MicroscopyOpticalChannel object containing metadata about the indicator and filters used to collect + this data. + target_type: MicroscopyOpticalChannel + + - neurodata_type_def: PlanarMicroscopySeries + neurodata_type_inc: MicroscopySeries + doc: Imaging data acquired over time from an optical channel in a microscope while a light source illuminates a + planar imaging space. + datasets: + - name: data + doc: Recorded imaging data, shaped by (number of frames, frame height, frame width). + dtype: numeric + dims: + - frames + - height + - width + shape: + - null + - null + - null + links: + - name: imaging_space + doc: Link to PlanarImagingSpace object containing metadata about the region of physical space this imaging data + was recorded from. + target_type: PlanarImagingSpace + + - neurodata_type_def: VariableDepthMicroscopySeries + neurodata_type_inc: PlanarMicroscopySeries + doc: Volumetric imaging data acquired over an irregular number and amount of depths; for instance, when using an + electrically tunable lens. + datasets: + - name: depth_per_frame_in_mm + doc: Depth of each frame in the data array. + These values offset the 'z' value of the origin_coordinates of the linked imaging_space object. + dtype: numeric + dims: + - frames + shape: + - null + + - neurodata_type_def: VolumetricMicroscopySeries + neurodata_type_inc: MicroscopySeries + doc: Volumetric imaging data acquired over time from an optical channel in a microscope while a light source + illuminates a volumetric imaging space. + Assumes the number of depth scans used to construct the volume is regular. + datasets: + - name: data + doc: Recorded imaging data, shaped by (number of frames, frame height, frame width, number of depth planes). + dtype: numeric + dims: + - frames + - height + - width + - depth + shape: + - null + - null + - null + - null + links: + - name: imaging_space + doc: Link to VolumetricImagingSpace object containing metadata about the region of physical space this imaging data + was recorded from. + target_type: VolumetricImagingSpace diff --git a/spec/ndx-microscopy.namespace.yaml b/spec/ndx-microscopy.namespace.yaml index 9dc616d..8399608 100644 --- a/spec/ndx-microscopy.namespace.yaml +++ b/spec/ndx-microscopy.namespace.yaml @@ -1,14 +1,13 @@ namespaces: -- author: - - Cody Baker and Alessandra Trapani +- name: ndx-microscopy + doc: Microscopy extension to NWB standard. + author: + - Cody Baker + - Alessandra Trapani contact: - cody.baker@catalystneuro.com - doc: An example extension to demonstrate the TAB proposal for enhancements to optical - physiology neurodata types. - name: ndx-microscopy + - alessandra.trapani@catalystneuro.com schema: - namespace: core - neurodata_types: - - ElectricalSeries - source: ndx-microscopy.extensions.yaml version: 0.1.0 diff --git a/src/pynwb/README.md b/src/pynwb/README.md deleted file mode 100644 index e69de29..0000000 diff --git a/src/pynwb/ndx_microscopy/__init__.py b/src/pynwb/ndx_microscopy/__init__.py index aeb3c06..a8cf19a 100644 --- a/src/pynwb/ndx_microscopy/__init__.py +++ b/src/pynwb/ndx_microscopy/__init__.py @@ -1,26 +1,45 @@ import os -from pynwb import load_namespaces, get_class - -# Set path of the namespace.yaml file to the expected install location -ndx_microscopy_specpath = os.path.join( - os.path.dirname(__file__), - 'spec', - 'ndx-microscopy.namespace.yaml' -) - -# If the extension has not been installed yet but we are running directly from -# the git repo -if not os.path.exists(ndx_microscopy_specpath): - ndx_microscopy_specpath = os.path.abspath(os.path.join( - os.path.dirname(__file__), - '..', '..', '..', - 'spec', - 'ndx-microscopy.namespace.yaml' - )) - -# Load the namespace -load_namespaces(ndx_microscopy_specpath) - -# TODO: import your classes here or define your class using get_class to make -# them accessible at the package level -TetrodeSeries = get_class('TetrodeSeries', 'ndx-microscopy') + +from pynwb import get_class, load_namespaces + +try: + from importlib.resources import files +except ImportError: + # TODO: Remove when python 3.9 becomes the new minimum + from importlib_resources import files + +extension_name = "ndx-microscopy" + +# Get path to the namespace.yaml file with the expected location when installed not in editable mode +__location_of_this_file = files(__name__) +__spec_path = __location_of_this_file / "spec" / f"{extension_name}.namespace.yaml" + +# If that path does not exist, we are likely running in editable mode. Use the local path instead +if not os.path.exists(__spec_path): + __spec_path = __location_of_this_file.parent.parent.parent / "spec" / f"{extension_name}.namespace.yaml" + +load_namespaces(str(__spec_path)) + +Microscope = get_class("Microscope", extension_name) +LightSource = get_class("LightSource", extension_name) +MicroscopyOpticalChannel = get_class("MicroscopyOpticalChannel", extension_name) +ImagingSpace = get_class("ImagingSpace", extension_name) +PlanarImagingSpace = get_class("PlanarImagingSpace", extension_name) +VolumetricImagingSpace = get_class("VolumetricImagingSpace", extension_name) +MicroscopySeries = get_class("MicroscopySeries", extension_name) +PlanarMicroscopySeries = get_class("PlanarMicroscopySeries", extension_name) +VariableDepthMicroscopySeries = get_class("VariableDepthMicroscopySeries", extension_name) +VolumetricMicroscopySeries = get_class("VolumetricMicroscopySeries", extension_name) + +__all__ = [ + "Microscope", + "LightSource", + "MicroscopyOpticalChannel", + "ImagingSpace", + "PlanarImagingSpace", + "VolumetricImagingSpace", + "MicroscopySeries", + "PlanarMicroscopySeries", + "VariableDepthMicroscopySeries", + "VolumetricMicroscopySeries", +] diff --git a/src/pynwb/ndx_microscopy/testing/__init__.py b/src/pynwb/ndx_microscopy/testing/__init__.py new file mode 100644 index 0000000..adad764 --- /dev/null +++ b/src/pynwb/ndx_microscopy/testing/__init__.py @@ -0,0 +1,21 @@ +from ._mock import ( + mock_LightSource, + mock_Microscope, + mock_MicroscopyOpticalChannel, + mock_PlanarImagingSpace, + mock_PlanarMicroscopySeries, + mock_VariableDepthMicroscopySeries, + mock_VolumetricImagingSpace, + mock_VolumetricMicroscopySeries, +) + +__all__ = [ + "mock_Microscope", + "mock_LightSource", + "mock_MicroscopyOpticalChannel", + "mock_PlanarImagingSpace", + "mock_VolumetricImagingSpace", + "mock_PlanarMicroscopySeries", + "mock_VariableDepthMicroscopySeries", + "mock_VolumetricMicroscopySeries", +] diff --git a/src/pynwb/ndx_microscopy/testing/_mock.py b/src/pynwb/ndx_microscopy/testing/_mock.py new file mode 100644 index 0000000..7eebeb5 --- /dev/null +++ b/src/pynwb/ndx_microscopy/testing/_mock.py @@ -0,0 +1,288 @@ +import warnings +from typing import Optional, Tuple + +import numpy as np +from pynwb.testing.mock.utils import name_generator + +import ndx_microscopy + + +def mock_Microscope( + *, + name: Optional[str] = None, + description: str = "This is a mock instance of a Microscope type to be used for rapid testing.", + manufacturer: str = "A fake manufacturer of the mock microscope.", + model: str = "A fake model of the mock microscope.", +) -> ndx_microscopy.Microscope: + microscope = ndx_microscopy.Microscope( + name=name or name_generator("Microscope"), + description=description, + manufacturer=manufacturer, + model=model, + ) + return microscope + + +def mock_LightSource( + *, + name: Optional[str] = None, + description: str = "This is a mock instance of a LightSource type to be used for rapid testing.", + manufacturer: str = "A fake manufacturer of the mock light source.", + model: str = "A fake model of the mock light source.", + filter_description: str = "A description about the fake filter used by the mock light source.", + excitation_wavelength_in_nm: float = 500.0, + peak_power_in_W: float = 0.7, + peak_pulse_energy_in_J: float = 0.7, + intensity_in_W_per_m2: float = 0.005, + exposure_time_in_s: float = 2.51e-13, + pulse_rate_in_Hz: float = 2.0e6, +) -> ndx_microscopy.LightSource: + light_source = ndx_microscopy.LightSource( + name=name or name_generator("LightSource"), + description=description, + manufacturer=manufacturer, + model=model, + filter_description=filter_description, + excitation_wavelength_in_nm=excitation_wavelength_in_nm, + peak_power_in_W=peak_power_in_W, + peak_pulse_energy_in_J=peak_pulse_energy_in_J, + intensity_in_W_per_m2=intensity_in_W_per_m2, + exposure_time_in_s=exposure_time_in_s, + pulse_rate_in_Hz=pulse_rate_in_Hz, + ) + return light_source + + +def mock_MicroscopyOpticalChannel( + *, + name: Optional[str] = None, + description: str = "This is a mock instance of a MicroscopyOpticalChannel type to be used for rapid testing.", + indicator: str = "The indicator targeted by the mock optical channel.", + filter_description: str = "A description about the fake filter used by the mock optical channel.", + emission_wavelength_in_nm: float = 450.0, +) -> ndx_microscopy.MicroscopyOpticalChannel: + optical_channel = ndx_microscopy.MicroscopyOpticalChannel( + name=name or name_generator("MicroscopyOpticalChannel"), + description=description, + indicator=indicator, + filter_description=filter_description, + emission_wavelength_in_nm=emission_wavelength_in_nm, + ) + return optical_channel + + +def mock_PlanarImagingSpace( + *, + microscope: ndx_microscopy.Microscope, + name: Optional[str] = None, + description: str = "This is a mock instance of a PlanarImagingSpace type to be used for rapid testing.", + origin_coordinates: Tuple[float, float, float] = (-1.2, -0.6, -2), + grid_spacing: Tuple[float, float, float] = (0.2, 0.2), + location: str = "The location targeted by the mock imaging space.", + reference_frame: str = "The reference frame of the mock planar imaging space.", +) -> ndx_microscopy.PlanarImagingSpace: + planar_imaging_space = ndx_microscopy.PlanarImagingSpace( + name=name or name_generator("PlanarImagingSpace"), + description=description, + microscope=microscope, + origin_coordinates=origin_coordinates, + grid_spacing=grid_spacing, + location=location, + reference_frame=reference_frame, + ) + return planar_imaging_space + + +def mock_VolumetricImagingSpace( + *, + microscope: ndx_microscopy.Microscope, + name: Optional[str] = None, + description: str = "This is a mock instance of a VolumetricImagingSpace type to be used for rapid testing.", + origin_coordinates: Tuple[float, float, float] = (-1.2, -0.6, -2), + grid_spacing: Tuple[float, float, float] = (0.2, 0.2, 0.5), + location: str = "The location targeted by the mock imaging space.", + reference_frame: str = "The reference frame of the mock volumetric imaging space.", +) -> ndx_microscopy.VolumetricImagingSpace: + volumetric_imaging_space = ndx_microscopy.VolumetricImagingSpace( + name=name or name_generator("VolumetricImagingSpace"), + description=description, + microscope=microscope, + origin_coordinates=origin_coordinates, + grid_spacing=grid_spacing, + location=location, + reference_frame=reference_frame, + ) + return volumetric_imaging_space + + +def mock_PlanarMicroscopySeries( + *, + microscope: ndx_microscopy.Microscope, + light_source: ndx_microscopy.LightSource, + imaging_space: ndx_microscopy.PlanarImagingSpace, + optical_channel: ndx_microscopy.MicroscopyOpticalChannel, + name: Optional[str] = None, + description: str = "This is a mock instance of a PlanarMicroscopySeries type to be used for rapid testing.", + data: Optional[np.ndarray] = None, + unit: str = "a.u.", + conversion: float = 1.0, + offset: float = 0.0, + starting_time: Optional[float] = None, + rate: Optional[float] = None, + timestamps: Optional[np.ndarray] = None, +) -> ndx_microscopy.PlanarMicroscopySeries: + series_name = name or name_generator("PlanarMicroscopySeries") + series_data = data if data is not None else np.ones(shape=(15, 5, 5)) + + if timestamps is None: + series_starting_time = starting_time or 0.0 + series_rate = rate or 10.0 + series_timestamps = None + else: + if starting_time is not None or rate is not None: + warnings.warn( + message=( + "Timestamps were provided in addition to either rate or starting_time! " + "Please specify only timestamps, or both starting_time and rate. Timestamps will take precedence." + ), + stacklevel=2, + ) + + series_starting_time = None + series_rate = None + series_timestamps = timestamps + + planar_microscopy_series = ndx_microscopy.PlanarMicroscopySeries( + name=series_name, + description=description, + microscope=microscope, + light_source=light_source, + imaging_space=imaging_space, + optical_channel=optical_channel, + data=series_data, + unit=unit, + conversion=conversion, + offset=offset, + starting_time=series_starting_time, + rate=series_rate, + timestamps=series_timestamps, + ) + return planar_microscopy_series + + +def mock_VariableDepthMicroscopySeries( + *, + microscope: ndx_microscopy.Microscope, + light_source: ndx_microscopy.LightSource, + imaging_space: ndx_microscopy.PlanarImagingSpace, + optical_channel: ndx_microscopy.MicroscopyOpticalChannel, + name: Optional[str] = None, + description: str = "This is a mock instance of a PlanarMicroscopySeries type to be used for rapid testing.", + data: Optional[np.ndarray] = None, + depth_per_frame_in_mm: Optional[np.ndarray] = None, + unit: str = "a.u.", + conversion: float = 1.0, + offset: float = 0.0, + starting_time: Optional[float] = None, + rate: Optional[float] = None, + timestamps: Optional[np.ndarray] = None, +) -> ndx_microscopy.VariableDepthMicroscopySeries: + series_name = name or name_generator("VariableDepthMicroscopySeries") + series_data = data if data is not None else np.ones(shape=(15, 5, 5)) + + series_depth_per_frame_in_mm = ( + depth_per_frame_in_mm + if depth_per_frame_in_mm is not None + else np.linspace(start=0.0, stop=1.0, num=series_data.shape[0]) + ) + + if timestamps is None: + series_starting_time = starting_time or 0.0 + series_rate = rate or 10.0 + series_timestamps = None + else: + if starting_time is not None or rate is not None: + warnings.warn( + message=( + "Timestamps were provided in addition to either rate or starting_time! " + "Please specify only timestamps, or both starting_time and rate. Timestamps will take precedence." + ), + stacklevel=2, + ) + + series_starting_time = None + series_rate = None + series_timestamps = timestamps + + variable_depth_microscopy_series = ndx_microscopy.VariableDepthMicroscopySeries( + name=series_name, + description=description, + microscope=microscope, + light_source=light_source, + imaging_space=imaging_space, + optical_channel=optical_channel, + data=series_data, + depth_per_frame_in_mm=series_depth_per_frame_in_mm, + unit=unit, + conversion=conversion, + offset=offset, + starting_time=series_starting_time, + rate=series_rate, + timestamps=series_timestamps, + ) + return variable_depth_microscopy_series + + +def mock_VolumetricMicroscopySeries( + *, + microscope: ndx_microscopy.Microscope, + light_source: ndx_microscopy.LightSource, + imaging_space: ndx_microscopy.VolumetricImagingSpace, + optical_channel: ndx_microscopy.MicroscopyOpticalChannel, + name: Optional[str] = None, + description: str = "This is a mock instance of a VolumetricMicroscopySeries type to be used for rapid testing.", + data: Optional[np.ndarray] = None, + unit: str = "a.u.", + conversion: float = 1.0, + offset: float = 0.0, + starting_time: Optional[float] = None, + rate: Optional[float] = None, + timestamps: Optional[np.ndarray] = None, +) -> ndx_microscopy.VolumetricMicroscopySeries: + series_name = name or name_generator("VolumetricMicroscopySeries") + series_data = data if data is not None else np.ones(shape=(5, 5, 5, 3)) + + if timestamps is None: + series_starting_time = starting_time or 0.0 + series_rate = rate or 10.0 + series_timestamps = None + else: + if starting_time is not None or rate is not None: + warnings.warn( + message=( + "Timestamps were provided in addition to either rate or starting_time! " + "Please specify only timestamps, or both starting_time and rate. Timestamps will take precedence." + ), + stacklevel=2, + ) + + series_starting_time = None + series_rate = None + series_timestamps = timestamps + + volumetric_microscopy_series = ndx_microscopy.VolumetricMicroscopySeries( + name=series_name, + description=description, + microscope=microscope, + light_source=light_source, + imaging_space=imaging_space, + optical_channel=optical_channel, + data=series_data, + unit=unit, + conversion=conversion, + offset=offset, + starting_time=series_starting_time, + rate=series_rate, + timestamps=series_timestamps, + ) + return volumetric_microscopy_series diff --git a/src/pynwb/tests/test_constructors.py b/src/pynwb/tests/test_constructors.py new file mode 100644 index 0000000..1410594 --- /dev/null +++ b/src/pynwb/tests/test_constructors.py @@ -0,0 +1,75 @@ +"""Test in-memory Python API constructors for the ndx-microscopy extension.""" + +import pytest + +from ndx_microscopy.testing import ( + mock_LightSource, + mock_Microscope, + mock_MicroscopyOpticalChannel, + mock_PlanarImagingSpace, + mock_PlanarMicroscopySeries, + mock_VariableDepthMicroscopySeries, + mock_VolumetricImagingSpace, + mock_VolumetricMicroscopySeries, +) + + +def test_constructor_microscope(): + mock_Microscope() + + +def test_constructor_light_source(): + mock_LightSource() + + +def test_constructor_microscopy_optical_channel(): + mock_MicroscopyOpticalChannel() + + +def test_constructor_planar_image_space(): + microscope = mock_Microscope() + + mock_PlanarImagingSpace(microscope=microscope) + + +def test_constructor_volumetric_image_space(): + microscope = mock_Microscope() + + mock_VolumetricImagingSpace(microscope=microscope) + + +def test_constructor_planar_microscopy_series(): + microscope = mock_Microscope() + light_source = mock_LightSource() + imaging_space = mock_PlanarImagingSpace(microscope=microscope) + optical_channel = mock_MicroscopyOpticalChannel() + + mock_PlanarMicroscopySeries( + microscope=microscope, light_source=light_source, imaging_space=imaging_space, optical_channel=optical_channel + ) + + +def test_constructor_variable_depth_microscopy_series(): + microscope = mock_Microscope() + light_source = mock_LightSource() + imaging_space = mock_PlanarImagingSpace(microscope=microscope) + optical_channel = mock_MicroscopyOpticalChannel() + + mock_VariableDepthMicroscopySeries( + microscope=microscope, light_source=light_source, imaging_space=imaging_space, optical_channel=optical_channel + ) + + +def test_constructor_volumetric_microscopy_series(): + microscope = mock_Microscope() + light_source = mock_LightSource() + imaging_space = mock_VolumetricImagingSpace(microscope=microscope) + optical_channel = mock_MicroscopyOpticalChannel() + + mock_VolumetricMicroscopySeries( + microscope=microscope, light_source=light_source, imaging_space=imaging_space, optical_channel=optical_channel + ) + + +if __name__ == "__main__": + pytest.main() # Required since not a typical package structure diff --git a/src/pynwb/tests/test_roundtrip.py b/src/pynwb/tests/test_roundtrip.py new file mode 100644 index 0000000..15644b1 --- /dev/null +++ b/src/pynwb/tests/test_roundtrip.py @@ -0,0 +1,164 @@ +"""Test roundtrip (write and read back) of the Python API for the ndx-microscopy extension.""" + +from pynwb.testing import TestCase as pynwb_TestCase +from pynwb.testing.mock.file import mock_NWBFile + +import pynwb +from ndx_microscopy.testing import ( + mock_LightSource, + mock_Microscope, + mock_MicroscopyOpticalChannel, + mock_PlanarImagingSpace, + mock_PlanarMicroscopySeries, + mock_VariableDepthMicroscopySeries, + mock_VolumetricImagingSpace, + mock_VolumetricMicroscopySeries, +) + + +class TestPlanarMicroscopySeriesSimpleRoundtrip(pynwb_TestCase): + """Simple roundtrip test for PlanarMicroscopySeries.""" + + def setUp(self): + self.nwbfile_path = "test.nwb" + + def tearDown(self): + pynwb.testing.remove_test_file(self.nwbfile_path) + + def test_roundtrip(self): + nwbfile = mock_NWBFile() + + microscope = mock_Microscope(name="Microscope") + nwbfile.add_device(devices=microscope) + + light_source = mock_LightSource(name="LightSource") + nwbfile.add_device(devices=light_source) + + imaging_space = mock_PlanarImagingSpace(name="PlanarImagingSpace", microscope=microscope) + nwbfile.add_lab_meta_data(lab_meta_data=imaging_space) # Would prefer .add_imaging_spacec() + + optical_channel = mock_MicroscopyOpticalChannel(name="MicroscopyOpticalChannel") + nwbfile.add_lab_meta_data(lab_meta_data=optical_channel) + + planar_microscopy_series = mock_PlanarMicroscopySeries( + name="PlanarMicroscopySeries", + microscope=microscope, + light_source=light_source, + imaging_space=imaging_space, + optical_channel=optical_channel, + ) + nwbfile.add_acquisition(nwbdata=planar_microscopy_series) + + with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="w") as io: + io.write(nwbfile) + + with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="r", load_namespaces=True) as io: + read_nwbfile = io.read() + + self.assertContainerEqual(microscope, read_nwbfile.devices["Microscope"]) + self.assertContainerEqual(light_source, read_nwbfile.devices["LightSource"]) + + self.assertContainerEqual(imaging_space, read_nwbfile.lab_meta_data["PlanarImagingSpace"]) + self.assertContainerEqual(optical_channel, read_nwbfile.lab_meta_data["MicroscopyOpticalChannel"]) + + self.assertContainerEqual(planar_microscopy_series, read_nwbfile.acquisition["PlanarMicroscopySeries"]) + + +class TestVolumetricMicroscopySeriesSimpleRoundtrip(pynwb_TestCase): + """Simple roundtrip test for VolumetricMicroscopySeries.""" + + def setUp(self): + self.nwbfile_path = "test.nwb" + + def tearDown(self): + pynwb.testing.remove_test_file(self.nwbfile_path) + + def test_roundtrip(self): + nwbfile = mock_NWBFile() + + microscope = mock_Microscope(name="Microscope") + nwbfile.add_device(devices=microscope) + + light_source = mock_LightSource(name="LightSource") + nwbfile.add_device(devices=light_source) + + imaging_space = mock_VolumetricImagingSpace(name="VolumetricImagingSpace", microscope=microscope) + nwbfile.add_lab_meta_data(lab_meta_data=imaging_space) # Would prefer .add_imaging_spacec() + + optical_channel = mock_MicroscopyOpticalChannel(name="MicroscopyOpticalChannel") + nwbfile.add_lab_meta_data(lab_meta_data=optical_channel) + + volumetric_microscopy_series = mock_VolumetricMicroscopySeries( + name="VolumetricMicroscopySeries", + microscope=microscope, + light_source=light_source, + imaging_space=imaging_space, + optical_channel=optical_channel, + ) + nwbfile.add_acquisition(nwbdata=volumetric_microscopy_series) + + with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="w") as io: + io.write(nwbfile) + + with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="r", load_namespaces=True) as io: + read_nwbfile = io.read() + + self.assertContainerEqual(microscope, read_nwbfile.devices["Microscope"]) + self.assertContainerEqual(light_source, read_nwbfile.devices["LightSource"]) + + self.assertContainerEqual(imaging_space, read_nwbfile.lab_meta_data["VolumetricImagingSpace"]) + self.assertContainerEqual(optical_channel, read_nwbfile.lab_meta_data["MicroscopyOpticalChannel"]) + + self.assertContainerEqual( + volumetric_microscopy_series, read_nwbfile.acquisition["VolumetricMicroscopySeries"] + ) + + +class TestVariableDepthMicroscopySeriesSimpleRoundtrip(pynwb_TestCase): + """Simple roundtrip test for VariableDepthMicroscopySeries.""" + + def setUp(self): + self.nwbfile_path = "test.nwb" + + def tearDown(self): + pynwb.testing.remove_test_file(self.nwbfile_path) + + def test_roundtrip(self): + nwbfile = mock_NWBFile() + + microscope = mock_Microscope(name="Microscope") + nwbfile.add_device(devices=microscope) + + light_source = mock_LightSource(name="LightSource") + nwbfile.add_device(devices=light_source) + + imaging_space = mock_PlanarImagingSpace(name="PlanarImagingSpace", microscope=microscope) + nwbfile.add_lab_meta_data(lab_meta_data=imaging_space) # Would prefer .add_imaging_spacec() + + optical_channel = mock_MicroscopyOpticalChannel(name="MicroscopyOpticalChannel") + nwbfile.add_lab_meta_data(lab_meta_data=optical_channel) + + variable_depth_microscopy_series = mock_VariableDepthMicroscopySeries( + name="VariableDepthMicroscopySeries", + microscope=microscope, + light_source=light_source, + imaging_space=imaging_space, + optical_channel=optical_channel, + ) + nwbfile.add_acquisition(nwbdata=variable_depth_microscopy_series) + + with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="w") as io: + io.write(nwbfile) + + with pynwb.NWBHDF5IO(path=self.nwbfile_path, mode="r", load_namespaces=True) as io: + read_nwbfile = io.read() + + self.assertContainerEqual(microscope, read_nwbfile.devices["Microscope"]) + self.assertContainerEqual(light_source, read_nwbfile.devices["LightSource"]) + + self.assertContainerEqual(imaging_space, read_nwbfile.lab_meta_data["PlanarImagingSpace"]) + self.assertContainerEqual(optical_channel, read_nwbfile.lab_meta_data["MicroscopyOpticalChannel"]) + + self.assertContainerEqual( + variable_depth_microscopy_series, read_nwbfile.acquisition["VariableDepthMicroscopySeries"] + ) diff --git a/src/pynwb/tests/test_tetrodeseries.py b/src/pynwb/tests/test_tetrodeseries.py deleted file mode 100644 index 489297e..0000000 --- a/src/pynwb/tests/test_tetrodeseries.py +++ /dev/null @@ -1,170 +0,0 @@ -import datetime -import numpy as np - -from pynwb import NWBHDF5IO, NWBFile -from pynwb.core import DynamicTableRegion -from pynwb.device import Device -from pynwb.ecephys import ElectrodeGroup -from pynwb.file import ElectrodeTable as get_electrode_table -from pynwb.testing import TestCase, remove_test_file, AcquisitionH5IOMixin - -from ndx_microscopy import TetrodeSeries - - -def set_up_nwbfile(): - nwbfile = NWBFile( - session_description='session_description', - identifier='identifier', - session_start_time=datetime.datetime.now(datetime.timezone.utc) - ) - - device = nwbfile.create_device( - name='device_name' - ) - - electrode_group = nwbfile.create_electrode_group( - name='electrode_group', - description='description', - location='location', - device=device - ) - - for i in np.arange(10.): - nwbfile.add_electrode( - x=i, - y=i, - z=i, - imp=np.nan, - location='location', - filtering='filtering', - group=electrode_group - ) - - return nwbfile - - -class TestTetrodeSeriesConstructor(TestCase): - - def setUp(self): - """Set up an NWB file. Necessary because TetrodeSeries requires references to electrodes.""" - self.nwbfile = set_up_nwbfile() - - def test_constructor(self): - """Test that the constructor for TetrodeSeries sets values as expected.""" - all_electrodes = self.nwbfile.create_electrode_table_region( - region=list(range(0, 10)), - description='all the electrodes' - ) - - data = np.random.rand(100, 3) - tetrode_series = TetrodeSeries( - name='name', - description='description', - data=data, - rate=1000., - electrodes=all_electrodes, - trode_id=1 - ) - - self.assertEqual(tetrode_series.name, 'name') - self.assertEqual(tetrode_series.description, 'description') - np.testing.assert_array_equal(tetrode_series.data, data) - self.assertEqual(tetrode_series.rate, 1000.) - self.assertEqual(tetrode_series.starting_time, 0) - self.assertEqual(tetrode_series.electrodes, all_electrodes) - self.assertEqual(tetrode_series.trode_id, 1) - - -class TestTetrodeSeriesRoundtrip(TestCase): - """Simple roundtrip test for TetrodeSeries.""" - - def setUp(self): - self.nwbfile = set_up_nwbfile() - self.path = 'test.nwb' - - def tearDown(self): - remove_test_file(self.path) - - def test_roundtrip(self): - """ - Add a TetrodeSeries to an NWBFile, write it to file, read the file, and test that the TetrodeSeries from the - file matches the original TetrodeSeries. - """ - all_electrodes = self.nwbfile.create_electrode_table_region( - region=list(range(0, 10)), - description='all the electrodes' - ) - - data = np.random.rand(100, 3) - tetrode_series = TetrodeSeries( - name='TetrodeSeries', - description='description', - data=data, - rate=1000., - electrodes=all_electrodes, - trode_id=1 - ) - - self.nwbfile.add_acquisition(tetrode_series) - - with NWBHDF5IO(self.path, mode='w') as io: - io.write(self.nwbfile) - - with NWBHDF5IO(self.path, mode='r', load_namespaces=True) as io: - read_nwbfile = io.read() - self.assertContainerEqual(tetrode_series, read_nwbfile.acquisition['TetrodeSeries']) - - -class TestTetrodeSeriesRoundtripPyNWB(AcquisitionH5IOMixin, TestCase): - """Complex, more complete roundtrip test for TetrodeSeries using pynwb.testing infrastructure.""" - - def setUpContainer(self): - """ Return the test TetrodeSeries to read/write """ - self.device = Device( - name='device_name' - ) - - self.group = ElectrodeGroup( - name='electrode_group', - description='description', - location='location', - device=self.device - ) - - self.table = get_electrode_table() # manually create a table of electrodes - for i in np.arange(10.): - self.table.add_row( - x=i, - y=i, - z=i, - imp=np.nan, - location='location', - filtering='filtering', - group=self.group, - group_name='electrode_group' - ) - - all_electrodes = DynamicTableRegion( - data=list(range(0, 10)), - description='all the electrodes', - name='electrodes', - table=self.table - ) - - data = np.random.rand(100, 3) - tetrode_series = TetrodeSeries( - name='name', - description='description', - data=data, - rate=1000., - electrodes=all_electrodes, - trode_id=1 - ) - return tetrode_series - - def addContainer(self, nwbfile): - """Add the test TetrodeSeries and related objects to the given NWBFile.""" - nwbfile.add_device(self.device) - nwbfile.add_electrode_group(self.group) - nwbfile.set_electrode_table(self.table) - nwbfile.add_acquisition(self.container) diff --git a/src/spec/create_extension_spec.py b/src/spec/create_extension_spec.py deleted file mode 100644 index bdaa367..0000000 --- a/src/spec/create_extension_spec.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -import os.path - -from pynwb.spec import NWBNamespaceBuilder, export_spec, NWBGroupSpec, NWBAttributeSpec -# TODO: import other spec classes as needed -# from pynwb.spec import NWBDatasetSpec, NWBLinkSpec, NWBDtypeSpec, NWBRefSpec - - -def main(): - # these arguments were auto-generated from your cookiecutter inputs - ns_builder = NWBNamespaceBuilder( - doc="""An example extension to demonstrate the TAB proposal for enhancements to optical physiology neurodata types.""", - name="""ndx-microscopy""", - version="""0.1.0""", - author=list(map(str.strip, """Cody Baker and Alessandra Trapani""".split(','))), - contact=list(map(str.strip, """cody.baker@catalystneuro.com""".split(','))) - ) - - # TODO: specify the neurodata_types that are used by the extension as well - # as in which namespace they are found. - # this is similar to specifying the Python modules that need to be imported - # to use your new data types. - # all types included or used by the types specified here will also be - # included. - ns_builder.include_type('ElectricalSeries', namespace='core') - - # TODO: define your new data types - # see https://pynwb.readthedocs.io/en/latest/extensions.html#extending-nwb - # for more information - tetrode_series = NWBGroupSpec( - neurodata_type_def='TetrodeSeries', - neurodata_type_inc='ElectricalSeries', - doc=('An extension of ElectricalSeries to include the tetrode ID for ' - 'each time series.'), - attributes=[ - NWBAttributeSpec( - name='trode_id', - doc='The tetrode ID.', - dtype='int32' - ) - ], - ) - - # TODO: add all of your new data types to this list - new_data_types = [tetrode_series] - - # export the spec to yaml files in the spec folder - output_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'spec')) - export_spec(ns_builder, new_data_types, output_dir) - print('Spec files generated. Please make sure to rerun `pip install .` to load the changes.') - - -if __name__ == '__main__': - # usage: python create_extension_spec.py - main()