From b4f8838cbfbb7f8a117bd7e0aad19133d26868b4 Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Tue, 17 Jan 2023 07:31:35 -0800 Subject: [PATCH] Prepare 2.6.0 release (#534) * Update release notes to mark version 2.6.0 * Scrub NWB:N and update legal year * Remove old scripts and 1.0.4 schema * Update github action to validate schema * Update legal year * Fix release notes * Add link to NWB overview on main README * Update release template * Update release template * Update Readme.md * Update format_release_notes.rst * Add note about NWB 1 reference schema --- .github/PULL_REQUEST_TEMPLATE/release.md | 5 +- .github/workflows/validate_schema.yml | 12 +- Legal.txt | 2 +- README.rst | 10 +- bin/find_missing_help.py | 38 - bin/reformat_spec.py | 718 ----- core/nwb.file.yaml | 6 +- core/nwb.namespace.yaml | 6 +- docs/Readme.md | 24 +- docs/format/Readme.md | 2 +- docs/format/source/conf.py | 4 +- docs/format/source/credits.rst | 8 +- docs/format/source/format_release_notes.rst | 8 +- docs/storage/source/credits.rst | 8 +- docs/storage/source/storage_description.rst | 20 +- license.txt | 2 +- orig/schema.json | 2692 ------------------- 17 files changed, 61 insertions(+), 3504 deletions(-) delete mode 100755 bin/find_missing_help.py delete mode 100755 bin/reformat_spec.py delete mode 100644 orig/schema.json diff --git a/.github/PULL_REQUEST_TEMPLATE/release.md b/.github/PULL_REQUEST_TEMPLATE/release.md index 7ff48c92..ea0042f9 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release.md +++ b/.github/PULL_REQUEST_TEMPLATE/release.md @@ -1,4 +1,5 @@ Prepare for release of nwb-schema [version] +Target release date: [date] ### Before merging: - [ ] Update requirements versions as needed @@ -13,8 +14,8 @@ Prepare for release of nwb-schema [version] - [ ] Test docs locally (`cd docs/format; make fulldoc`) where the nwb-schema submodule in the local version of PyNWB is fully up-to-date with the head of the dev branch. - [ ] Push changes to this PR and make sure all PRs to be included in this release have been merged -- [ ] Check that the readthedocs build for this PR succeeds (build latest to pull the new branch, then activate and - build docs for new branch): https://readthedocs.org/projects/nwb-schema/builds/ +- [ ] Check that the readthedocs build for this PR succeeds (see auto-triggered PR build): + https://readthedocs.org/projects/nwb-schema/builds/ ### After merging: 1. Create a new git tag. Pull the latest master branch locally, run `git tag [version] --sign`, copy and paste the diff --git a/.github/workflows/validate_schema.yml b/.github/workflows/validate_schema.yml index 0d719325..a0a31eb2 100644 --- a/.github/workflows/validate_schema.yml +++ b/.github/workflows/validate_schema.yml @@ -1,6 +1,6 @@ name: Validate schema -on: [push, pull_request] +on: [push, pull_request, workflow_dispatch] jobs: validate: @@ -8,12 +8,12 @@ jobs: if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.9 - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - name: Set up Python 3.10 + uses: actions/setup-python@v4 with: - python-version: "3.9" - - name: Install hdmf + python-version: "3.10" + - name: Install dev branch of HDMF run: | pip install git+https://github.com/hdmf-dev/hdmf.git - name: Validate schema specification diff --git a/Legal.txt b/Legal.txt index 9d3176d4..1139ba74 100644 --- a/Legal.txt +++ b/Legal.txt @@ -1,4 +1,4 @@ -“nwb-schema” Copyright (c) 2017-2021, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. +“nwb-schema” Copyright (c) 2017-2023, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. If you have questions about your rights to use or distribute this software, please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov. diff --git a/README.rst b/README.rst index f6536cb3..c7cc942e 100644 --- a/README.rst +++ b/README.rst @@ -1,7 +1,9 @@ NWB Schema Format ======================== -A format specification schema for the Neurodata Without Borders: Neurophysiology (NWB:N) data format. +A format specification schema for the Neurodata Without Borders (NWB) data format. + +To get started using NWB, please go to the [NWB overview website](https://nwb-overview.readthedocs.io/en/latest/). This repo contains: @@ -19,11 +21,13 @@ For more information: - The PyNWB Python API for the NWB format is available on `Github `_ - The MatNWB Matlab API for the NWB format is available on `Github `_ +The NWB 1.0 format and API are archived in the [NeurodataWithoutBorders/api-python](https://github.com/NeurodataWithoutBorders/api-python) repository. https://github.com/NeurodataWithoutBorders/api-python/blob/master/nwb/nwb_core.py contains the reference schema for the NWB 1 format. + License ======================== -“nwb-schema” Copyright (c) 2017-2021, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. +“nwb-schema” Copyright (c) 2017-2023, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: @@ -41,7 +45,7 @@ You are under no obligation whatsoever to provide any bug fixes, patches, or upg Copyright ======================== -“nwb-schema” Copyright (c) 2017-2021, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. +“nwb-schema” Copyright (c) 2017-2023, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. If you have questions about your rights to use or distribute this software, please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov. diff --git a/bin/find_missing_help.py b/bin/find_missing_help.py deleted file mode 100755 index e5e84875..00000000 --- a/bin/find_missing_help.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python -import ruamel.yaml as yaml -import sys - - -missing = list() - -curr_file = None -stack = list() - -def find_missing_help(d): - stack.append(d.get('neurodata_type_def', d.get('neurodata_type_inc', d.get('name')))) - if d.get('neurodata_type_inc') == 'NWBContainer' and d.get('neurodata_type_def') is not None: - found_help = False - if 'attributes' in d: - for attr in d['attributes']: - if attr.get('name') == 'help' and attr.get('value') is not None: - found_help = True - if not found_help: - missing.append('%s: %s' % (curr_file, '/'.join(stack))) - - if 'groups' in d: - for sub_d in d['groups']: - find_missing_help(sub_d) - stack.pop() - - -for curr_file in sys.argv[1:]: - with open(curr_file) as fin: - fd = yaml.safe_load(fin) - for d in fd.get('groups', []): - find_missing_help(d) - -if len(missing) > 0: - print('The following types are missing help') - for t in missing: - print(t) - sys.exit(1) diff --git a/bin/reformat_spec.py b/bin/reformat_spec.py deleted file mode 100755 index 6c405fc6..00000000 --- a/bin/reformat_spec.py +++ /dev/null @@ -1,718 +0,0 @@ -#!/usr/bin/env python3 -import os -import sys -import json -import yaml -from ruamel import yaml - -#import pynwb -from collections import OrderedDict - -from datetime import datetime -from form.spec import Spec, AttributeSpec, LinkSpec, SpecNamespace, NamespaceBuilder -from pynwb.spec import NWBDatasetSpec, NWBGroupSpec, NWBNamespace, NWBAttributeSpec - -""" - stuff to clean up - - - float('NaN') should be just 'NaN' -""" - -CORE_NAMESPACE='core' - -global stack -stack = list() -def monitor(func): - def _func(name, d, **kwargs): - - nodename = name - if '<' in nodename: - end = nodename.rfind('>') - nodename = nodename[1:end] - elif '/' in nodename: - nodename = nodename[0:-1] - - stack.append(nodename) - ret = func(name, d, **kwargs) - stack.pop() - return ret - return _func - -def get_node(): - return '/'.join(stack) - -ignore = {'electrode_group', 'electrode_map', 'filtering', 'impedance'} -metadata_ndts = list() - -subspec_locations = { - 'ElectrodeGroup': 'ecephys', - 'IntracellularElectrode': 'icephys', - 'ImagingPlane': 'ophys', - 'OptogeneticStimulusSite': 'ogen', - 'Epoch': 'epoch', - -} - -device_spec = LinkSpec('the device that was used to record from this electrode group', 'Device', name='device', quantity='?') -eg_help = 'A physical grouping of channels' -dev_help = 'A recording device e.g. amplifier' -alternate_defs = { - 'ElectrodeGroup': NWBGroupSpec('One of possibly many groups, one for each electrode group.', - neurodata_type_def='ElectrodeGroup', - neurodata_type_inc='NWBContainer', - namespace=CORE_NAMESPACE, - attributes = [ - AttributeSpec('help', 'str', "Value is '%s'" % eg_help, value=eg_help) - ], - datasets = [ - NWBDatasetSpec('array with description for each channel', 'text', name='channel_description', shape=(None,), dims=('num_channels',)), - NWBDatasetSpec('array with location description for each channel e.g. "CA1"', 'text', name='channel_location', shape=(None,), dims=('num_channels',)), - NWBDatasetSpec('array with description of filtering applied to each channel', 'text', name='channel_filtering', shape=(None,), dims=('num_channels',)), - NWBDatasetSpec('xyz-coordinates for each channel. use NaN for unknown dimensions', 'text', name='channel_coordinates', shape=(None,3), dims=('num_channels', 'dimensions')), - NWBDatasetSpec('float array with impedance used on each channel. Can be 2D matrix to store a range', 'text', name='channel_impedance', shape=(None,), dims=('num_channels',)), - NWBDatasetSpec('description of this electrode group', 'text', name='description'), - NWBDatasetSpec('description of location of this electrode group', 'text', name='location'), - ], - links = [ - device_spec - ] - ), - 'Device': NWBGroupSpec('One of possibly many. Information about device and device description.', - neurodata_type_def='Device', - neurodata_type_inc='NWBContainer', - namespace=CORE_NAMESPACE, - attributes = [ - AttributeSpec('help', 'str', "Value is '%s'" % dev_help, value=dev_help) - ] - ) -} - -NAME_WILDCARD = "*" - -ndmap_to_group = { - "*": 'Device', -} - -ndmap = { - "": 'EpochTimeSeries', - "": 'Epoch', - "": 'Device', - "": 'SpecFile', - "": 'ElectrodeGroup', - "": 'IntracellularElectrode', - "": 'OptogeneticStimulusSite', - "": 'OpticalChannel', - "": 'ImagingPlane', - "": 'SpikeUnit', - "": 'ROI', - "": 'PlaneSegmentation', - "": 'CorrectedImageStack', -} - - - -#ImageSegmentation.image_plane.imaging_plane_name -metadata_links = { - 'electrode_idx': 'ElectrodeGroup', - 'imaging_plane': 'ImagingPlane', - 'site': 'OptogeneticStimulusSite', - 'imaging_plane_name': 'ImagingPlane', - 'electrode_name': 'IntracellularElectrode', -} - -metadata_links_doc = { - 'electrode_idx': 'link to ElectrodeGroup group that generated this TimeSeries data', - 'imaging_plane': 'link to ImagingPlane group from which this TimeSeries data was generated', - 'site': 'link to OptogeneticStimulusSite group that describes the site to which this stimulus was applied', - 'electrode_name': 'link to IntracellularElectrode group that describes th electrode that was used to apply or record this data', - 'imaging_plane_name': 'link to ImagingPlane group from which this TimeSeries data was generated', - -} -metadata_links_rename = { - 'electrode_idx': 'electrode_group', - 'electrode_name': 'electrode', - 'imaging_plane_name': 'imaging_plane' -} - -all_specs = OrderedDict() - - -include_doc = { - 'presentation/': 'TimeSeries objects containing data of presented stimuli', - 'templates/': 'TimeSeries objects containing template data of presented stimuli', - 'timeseries/': 'TimeSeries object containing data generated during data acquisition', - 'FilteredEphys/': 'ElectricalSeries object containing filtered electrophysiology data', - 'PupilTracking/': 'TimeSeries object containing time series data on pupil size', - 'Position/': 'SpatialSeries object containing position data', - 'Fluorescence/': 'RoiResponseSeries object containing fluorescence data for a ROI', - 'ProcessingModule': 'Interface objects containing data output from processing steps', - 'Module': 'Interface objects containing data output from processing steps', - - 'EventWaveform/': 'SpikeEventSeries object containing detected spike event waveforms', - 'EyeTracking/': 'SpatialSeries object containing data measuring direction of gaze', - 'BehavioralEpochs/': 'IntervalSeries object containing start and stop times of epochs', - 'DfOverF/': 'RoiResponseSeries object containing dF/F for a ROI', - 'LFP/': 'ElectricalSeries object containing LFP data for one or channels', - 'BehavioralTimeSeries/': 'TimeSeries object containing continuous behavioral data', - 'BehavioralEvents/': 'TimeSeries object containing irregular behavioral events', - 'CompassDirection/': 'SpatialSeries object containing direction of gaze travel', - -} - -nd_rename = { - 'Interface': 'NWBContainer', - 'Module': 'ProcessingModule', -} - -def build_group_helper(**kwargs): - myname = kwargs.get('name', None) - if myname == NAME_WILDCARD: - kwargs['name'] = None - myname = None - ndt = kwargs.get('neurodata_type_def') - inc = kwargs.get('neurodata_type_inc') - if ndt is not None: - if ndt == 'Device': - return alternate_defs[ndt] - kwargs['namespace'] = 'core' - if ndt in nd_rename: - kwargs['neurodata_type_def'] = nd_rename[ndt] - if inc is None and ndt != 'Interface': - kwargs['neurodata_type_inc'] = 'NWBContainer' - if inc is not None: - if inc in nd_rename: - kwargs['neurodata_type_inc'] = nd_rename[inc] - doc = kwargs.pop('doc') - if myname == None: - grp_spec = NWBGroupSpec(doc, **kwargs) - else: - if ndt is not None: - kwargs['default_name'] = myname - else: - kwargs['name'] = myname - grp_spec = NWBGroupSpec(doc, **kwargs) - return grp_spec - -@monitor -def build_group(name, d, ndtype=None): - #required = True - myname = name - if name[0] == '<': - if name[1].isupper(): - name = NAME_WILDCARD - quantity, myname = strip_characters(name) - if myname[-1] == '/': - myname = myname[:-1] - extends = None - if 'merge' in d: - merge = d.pop('merge') - base = merge[0] - end = base.rfind('>') - base = base[1:end] if end > 0 else base - #extends = all_specs[base] - extends = base - - if myname[0] == '<': - neurodata_type = ndmap.get(myname, ndmap_to_group.get(myname)) - if neurodata_type is None: - neurodata_type = ndtype - else: - myname = NAME_WILDCARD - else: - neurodata_type = ndtype - - desc = d.get('description', None) - if isinstance(desc, dict) or desc is None: - desc = d.pop('_description', None) - else: - d.pop('description', None) - - if 'attributes' in d: - attributes = d.pop('attributes', None) - if 'neurodata_type' in attributes: - neurodata_type = attributes.pop('neurodata_type')['value'] - attributes.pop('ancestry', None) - elif 'ancestry' in attributes: - #neurodata_type = attributes['ancestry']['value'][-1] - neurodata_type = attributes.pop('ancestry')['value'][-1] - if extends is not None: - if neurodata_type is None: - neurodata_type = myname - grp_spec = build_group_helper(name=myname, quantity=quantity, doc=desc, neurodata_type_def=neurodata_type, neurodata_type_inc=extends) - add_attributes(grp_spec, attributes) - elif neurodata_type is not None: - grp_spec = build_group_helper(name=myname, quantity=quantity, doc=desc, neurodata_type_def=neurodata_type, neurodata_type_inc=extends) - else: - if myname == NAME_WILDCARD: - grp_spec = build_group_helper(doc=desc, quantity=quantity, neurodata_type_inc=extends) - else: - grp_spec = build_group_helper(doc=desc, name=myname, quantity=quantity, neurodata_type_inc=extends) - - for key in sorted(d.keys()): - value = d[key] - tmp_name = key - if tmp_name == 'autogen': - continue - if tmp_name[0] == '_': - #TODO: figure out how to deal with these reserved keys - print ('found leading underscore: key=%s, ndt=%s, name=%s' % (key, neurodata_type, myname), file=sys.stderr) - continue - if isinstance(value, str): - continue - if 'autogen' in value: - if value['autogen']['type'] != 'create': - print ('skipping autogen: %s/%s' % (get_node(), key)) - continue - - if tmp_name == 'include': - ndt = next(iter(value.keys())) - quantity = None - if ndt[-1] != '/': - quantity = ndt[-1] - ndt = ndt[:-1] - ndt = ndt[1:ndt.rfind('>')] - if ndt == 'Interface': - ndt = 'NWBContainer' - doc = include_doc.get(name, include_doc.get(neurodata_type)) - vargs = {'neurodata_type_inc': ndt} - if quantity is not None: - vargs['quantity'] = quantity - if ndt is not None: - vargs['namespace'] = CORE_NAMESPACE - grp_spec.add_group(doc, **vargs) - elif 'link' in value: - ndt = value['link']['target_type'] - doc = value.get('description', None) - if ndt[0] == '<': - ndt = ndt[1:ndt.rfind('>')] - else: - ndt = ndt[0:-1] - link_name = key - if link_name[-1] == '/': - link_name = link_name[0:-1] - #grp_spec.include_neurodata_link(ndt, name=link_name) - grp_spec.add_link(doc, ndt, name=link_name) - elif 'merge' in value: - ndt = value['merge'][0] - ndt = ndt[1:ndt.rfind('>')] - doc = value['description'] - if key[0] == '<': - #grp_spec.include_neurodata_group(ndt) - grp_spec.add_group(doc, neurodata_type_inc=ndt, namespace=CORE_NAMESPACE) - else: - group_name = key - if group_name[-1] == '/': - group_name = group_name[0:-1] - vargs = {'neurodata_type_inc': ndt, name: group_name} - if ndt is not None: - vargs['namespace'] = CORE_NAMESPACE - grp_spec.add_group(doc, **vargs) - elif tmp_name in metadata_links: - ndt = metadata_links[tmp_name] - doc = metadata_links_doc[tmp_name] - grp_spec.add_link(doc, ndt, name=metadata_links_rename.get(tmp_name, tmp_name)) - else: - if key.rfind('/') == -1: # forward-slash not found - if key in ndmap_to_group: - grp_spec.set_group(build_group(tmp_name, value)) - else: - if tmp_name not in ignore: - grp_spec.set_dataset(build_dataset(tmp_name, value)) - else: - print('skipping', tmp_name) - else: - subgrp = build_group(tmp_name, value) - if subgrp.neurodata_type_def in subspec_locations: - if subgrp.neurodata_type_def in alternate_defs: - print('getting alternate_def for', subgrp.neurodata_type_def) - subgrp = alternate_defs[subgrp.neurodata_type_def] - vargs = {'neurodata_type_inc': subgrp.neurodata_type_def, 'namespace': CORE_NAMESPACE, 'quantity': '*'} - grp_spec.add_group(subgrp.doc, **vargs) - metadata_ndts.append(subgrp) - else: - grp_spec.set_group(subgrp) - - if neurodata_type is not None: - all_specs[neurodata_type] = grp_spec - return grp_spec - -dataset_ndt = { '': 'Image' } -@monitor -def build_dataset(name, d): - kwargs = remap_keys(name, d) - if 'name' in kwargs: - if kwargs['name'] in dataset_ndt: - tmpname = kwargs.pop('name') - kwargs['neurodata_type_def'] = dataset_ndt[tmpname] - #kwargs['neurodata_type_inc'] = 'NWBData' - if 'neurodata_type_def' in kwargs or 'neurodata_type_inc' in kwargs: - kwargs['namespace'] = CORE_NAMESPACE - dset_spec = NWBDatasetSpec(kwargs.pop('doc'), kwargs.pop('dtype'), **kwargs) - if 'attributes' in d: - add_attributes(dset_spec, d['attributes']) - return dset_spec - -def add_attributes(parent_spec, attributes): - for attr_name in sorted(attributes.keys()): - attr_spec = attributes[attr_name] - if 'autogen' in attr_spec: - print('skipping autogen attribute: %s.%s' % (get_node(), attr_name)) - continue - parent_spec.set_attribute(build_attribute(attr_name, attr_spec)) - -override_doc = { - 'conversion': "Scalar to multiply each element in data to convert it to the specified unit", - 'unit': "The base unit of measure used to store data. This should be in the SI unit. COMMENT: This is the SI unit (when appropriate) of the stored data, such as Volts. If the actual data is stored in millivolts, the field 'conversion' below describes how to convert the data to the specified SI unit.", - 'resolution': "Smallest meaningful difference between values in data, stored in the specified by unit. COMMENT: E.g., the change in value of the least significant bit, or a larger number if signal noise is known to be present. If unknown, use NaN", - 'help': 'A help statement', - -} -def build_attribute(name, d): - kwargs = remap_keys(name, d) - myname = kwargs.pop('name') - doc = kwargs.pop('doc') - dtype = kwargs.pop('dtype') - if 'value' in kwargs and isinstance(kwargs['value'], str): - if 'NaN' in kwargs['value']: - kwargs['value'] = 'NaN' - attr_spec = AttributeSpec(myname, dtype, doc, **kwargs) - return attr_spec - -def strip_characters(name): - flags = ('!', '?', '+', '*', '^') - quantity = 1 - retname = name - if retname != NAME_WILDCARD: - if name[-1] == '!': - retname = name[:-1] - quantity = 1 - elif name[-1] == '?': - retname = name[:-1] - quantity = '?' - elif name[-1] == '+': - retname = name[:-1] - quantity = '+' - elif name[-1] == '*': - retname = name[:-1] - quantity = '*' - elif name[-1] == '^': - retname = name[:-1] - quantity = '?' - - return (quantity, retname) - - -def remap_keys(name, d): - # TODO: add parsing of +/* for 'num_args' - # will move to quantity which takes on values *, +, ? , or an integer - ret = OrderedDict() - quantity, specname = strip_characters(name) - if quantity != 1: - ret['quantity'] = quantity - - if specname in ndmap: - ret['neurodata_type_def'] = ndmap[specname] - else: - ret['name'] = specname - #ret['name'] = name - #if name[-1] == '?': - # ret['required'] = False - # ret['name'] = name[:-1] - #elif name[-1] == '^': - # ret['name'] = name[:-1] - ret['dtype'] = d.get('data_type', 'None') - - value = d.get('value', None) - if isinstance(value, list) and len(value) == 1: - value = value[0] - - const = d.pop('const', False) - if value is not None: - if const: - ret['value'] = value - else: - ret['default_value'] = value - def_doc = None - ret['doc'] = d.get('description', def_doc) - - if 'value' in ret and ret['value'] is not None: - ret['doc'] = "Value is '%s'" % str(ret['value']) - elif ret['doc'] is None: - ret['doc'] = override_doc.get(ret['name']) - ret['dims'] = d.get('dimensions', None) - ret['shape'] = make_shape(ret['dims'], d) - ret['dims'] = get_dimensions(ret['dims'], d) - return ret - - -def join_components(components): - if isinstance(components[0], dict): - return "|".join(x['alias'] for x in components) - else: - return [join_components(c) for c in components] - -def get_dimensions(dims, d): - if dims is None: - return None - if isinstance(dims, str): - if dims in d and 'components' in d[dims]: - return join_components(d[dims]['components']) - return dims - else: - return [ get_dimensions(i, d) for i in dims ] - -def make_shape(dims, d): - if dims is None: - return None - if isinstance(dims, str): - if dims in d and 'components' in d[dims]: - return len(d[dims]['components']) - return None - else: - return [ make_shape(i, d) for i in dims ] - -def merge_spec(target, source): - for grp_spec in source.groups: - target.set_group(grp_spec) - for dset_spec in source.datasets: - target.set_dataset(dset_spec) - for attr_spec in source.attributes: - target.set_attribute(attr_spec) - -def load_spec(spec): - - spec = spec['fs']['core']['schema'] - - # load Module specs - # load File spec - # / - # /acquisition/ - # /analysis/ - # /epochs/ - # /general/ - # /general/extracellular_ephys/? - # /general/intracellular_ephys/? - # /general/optogenetics/? - # /general/optophysiology/? - # /processing/ - # /stimulus/ - - root = build_group('root', spec['/'], ndtype='NWBFile') - - root_help = 'an NWB:N file for storing cellular-based neurophysiology data' - root.add_attribute('help', 'str', "Value is '%s'" % root_help, value=root_help) - - - stack.append('NWBFile') - acquisition = build_group('acquisition', spec['/acquisition/']) - root.set_group(acquisition) - analysis = build_group('analysis', spec['/analysis/']) - root.set_group(analysis) - epochs = build_group('epochs', spec['/epochs/']) - root.set_group(epochs) - - module_json = spec['/processing/'].pop("/*") - - processing = build_group('processing', spec['/processing/']) - processing.add_group('Intermediate analysis of acquired data', neurodata_type_inc='ProcessingModule', quantity='*') - root.set_group(processing) - - stimulus = build_group('stimulus', spec['/stimulus/']) - root.set_group(stimulus) - - general = build_group('general', spec['/general/']) - root.set_group(general) - - extracellular_ephys = build_group('extracellular_ephys?', spec['/general/extracellular_ephys/?']) - general.set_group(extracellular_ephys) - - intracellular_ephys = build_group('intracellular_ephys?', spec['/general/intracellular_ephys/?']) - general.set_group(intracellular_ephys) - - optogenetics = build_group('optogenetics?', spec['/general/optogenetics/?']) - general.set_group(optogenetics) - - optophysiology = build_group('optophysiology?', spec['/general/optophysiology/?']) - general.set_group(optophysiology) - stack.pop() - - spec["/"]["attributes"]["description^"]['value'] = "no description" - spec["/"]["attributes"]["comments^"]['value'] = "no comments" - mod_spec = build_group('/', module_json, ndtype='ProcessingModule') - mod_help = 'A collection of analysis outputs from processing of data' - mod_spec.add_attribute('help', 'text', "Value is '%s'" % mod_help, value=mod_help) - base = [ - #build_group("/*", module_json, ndtype='Module'), - build_group("/", spec["/"], ndtype='NWBContainer'), - build_group("/", spec["/"], ndtype='TimeSeries'), - mod_spec, - ] - - - # load TimeSeries specs - - type_specs = OrderedDict() - subspecs = [ - 'epoch', - 'ecephys', - 'icephys', - 'image', - 'ophys', - 'ogen', - 'behavior', - 'misc', - 'retinotopy', - ] - type_specs['file'] = [root] - - type_specs['epoch'] = [] - - type_specs['ecephys'] = [ - "/", - "/", - "ClusterWaveforms/", - "Clustering/", - "FeatureExtraction/", - "EventDetection/", - "EventWaveform/", - "FilteredEphys/", - "LFP/", - ] - - type_specs['icephys'] = [ - "/", - "/", - "/", - "/", - "/", - "/" - ] - - type_specs['image'] = [ - "/", - "/", - "/", - "/", - ] - - type_specs['ophys'] = [ - "/", - "/", - "DfOverF/", - "Fluorescence/", - "ImageSegmentation/", - ] - - type_specs['ogen'] = [ - "/", - ] - - type_specs['behavior'] = [ - "/", - "BehavioralEpochs/", - "BehavioralEvents/", - "BehavioralTimeSeries/", - "PupilTracking/", - "EyeTracking/", - "CompassDirection/", - "Position/", - "MotionCorrection/", - ] - - type_specs['misc'] = [ - "/", - "/", - "/", - "UnitTimes/", - ] - - - type_specs['retinotopy'] = [ - "ImagingRetinotopy/", - ] - - def mapfunc(name): - namearg = name - ndt = None - if name[0] == '<': - namearg = name - ndt = name[1:name.rfind('>')] - #return build_group(NAME_WILDCARD, spec[name]) - else: - ndt = name[0:name.rfind('/')] - #return build_group(name, spec[name]) - - return build_group(namearg, spec[name], ndtype=ndt) - - for key in subspecs: - type_specs[key] = list(map(mapfunc, type_specs[key])) - - type_specs['base'] = base - for subspec in metadata_ndts: - loc = subspec_locations[subspec.neurodata_type_def] - type_specs[loc].append(subspec) - return { k: {'groups': v} for k, v in type_specs.items() } - - - -spec_path = sys.argv[1] -outdir = sys.argv[2] if len(sys.argv) > 2 else "." -with open(spec_path) as spec_in: - nwb_spec = load_spec(json.load(spec_in)) - #nwb_spec = load_iface(json.load(spec_in)) - - -ns = OrderedDict() -ns['doc'] = 'NWB namespace' -ns['name'] = CORE_NAMESPACE -ns['full_name'] = 'NWB core' -ns['version'] = '1.2.0' -ns['date'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') -ns['author'] = ['Keith Godfrey', 'Jeff Teeters', 'Oliver Ruebel', 'Andrew Tritt'] -ns['contact'] = ['keithg@alleninstitute.org', 'jteeters@berkeley.edu', 'oruebel@lbl.gov', 'ajtritt@lbl.gov'] -ns['namespace_cls'] = NWBNamespace -ns_builder = NamespaceBuilder(ns.pop('doc'), ns.pop('name'), **ns) - -schema = list() - -order = [ - 'base', - 'epoch', - 'image', - 'file', - 'misc', - 'behavior', - 'ecephys', - 'icephys', - 'ogen', - 'ophys', - 'retinotopy', -] -for key in order: - value = nwb_spec[key] - filename = 'nwb.%s.yaml' % key - for spec in value['groups']: - ns_builder.add_spec(filename, spec) - #with open('%s/%s' % (outdir, filename), 'w') as out: - # yaml.dump(json.loads(json.dumps(value)), out, default_flow_style=False) - #schema.append({'source': filename}) - -ns_file = 'nwb.namespace.yaml' -#ns['schema'] = schema -#ns = {'namespaces': [SpecNamespace.build_namespace(**ns)]} -#with open(ns_file, 'w') as out: -# yaml.dump(json.loads(json.dumps(ns)), out, default_flow_style=False) - -ns_builder.export(ns_file, outdir=outdir) - - -import tarfile -cwd = os.getcwd() -os.chdir(outdir) -tar = tarfile.open('nwb_core.tar', 'w') -for key in sorted(nwb_spec.keys()): - specfile = 'nwb.%s.yaml' % (key) - tar.add(specfile) -tar.add('nwb.namespace.yaml') -tar.close() -os.chdir(cwd) diff --git a/core/nwb.file.yaml b/core/nwb.file.yaml index cf97f0a7..6b0b9fe2 100644 --- a/core/nwb.file.yaml +++ b/core/nwb.file.yaml @@ -2,12 +2,12 @@ groups: - neurodata_type_def: NWBFile neurodata_type_inc: NWBContainer name: root - doc: An NWB:N file storing cellular-based neurophysiology data from a single + doc: An NWB file storing cellular-based neurophysiology data from a single experimental session. attributes: - name: nwb_version dtype: text - value: "2.6.0-alpha" + value: "2.6.0" doc: File version string. Use semantic versioning, e.g. 1.2.1. This will be the name of the format with trailing major, minor and patch numbers. datasets: @@ -452,7 +452,7 @@ groups: quantity: '?' attributes: - name: reference - doc: "Age is with reference to this event. Can be 'birth' or + doc: "Age is with reference to this event. Can be 'birth' or 'gestational'. If reference is omitted, 'birth' is implied." dtype: text required: false diff --git a/core/nwb.namespace.yaml b/core/nwb.namespace.yaml index d2c39c1b..5d146ed1 100644 --- a/core/nwb.namespace.yaml +++ b/core/nwb.namespace.yaml @@ -18,7 +18,7 @@ namespaces: full_name: NWB core schema: - namespace: hdmf-common - - doc: This source module contains base data types used throughout the NWB:N data + - doc: This source module contains base data types used throughout the NWB data format. source: nwb.base.yaml title: Base data types @@ -31,9 +31,9 @@ namespaces: - doc: This source module contains neurodata_types for image data. source: nwb.image.yaml title: Image data - - doc: Main NWB:N file specification. + - doc: Main NWB file specification. source: nwb.file.yaml - title: NWB:N file + title: NWB file - doc: Miscellaneous types. source: nwb.misc.yaml title: Miscellaneous neurodata_types. diff --git a/docs/Readme.md b/docs/Readme.md index 06b095d3..4b4b87f9 100644 --- a/docs/Readme.md +++ b/docs/Readme.md @@ -1,32 +1,32 @@ -**Overview** +## Overview The documentation for NWB consists of a series of documents describing the various components of NWB: -* ``docs/language`` with the documentation for the NWB:N specification language -* ``docs/format`` with the documentation of the NWB:N data format -* ``docs/storage`` with the documentation of the NWB:N storage component -* The documentation of the PyNWB API is managed in the PyNWB git repo +* ``docs/format`` with the documentation of the NWB data format +* ``docs/storage`` with the documentation of the NWB storage component See also: * [Documentation](https://github.com/NeurodataWithoutBorders/nwb-schema-language) for the [NWB Schema Language](https://schema-language.readthedocs.io/en/latest/) +* [PyNWB API](https://pynwb.readthedocs.io/) +* [MatNWB API](https://github.com/NeurodataWithoutBorders/matnwb) **Building Documentation** The documentation uses Sphinx and can be compiled using the provided Makefiles in the respective documentation -directories. The build process for the different documents is further described in the respective Readme.md files -in the corresponding documentation directories. In general, if you want to rebuild the full HTML and PDF versions -of the specification documents, simply use the ``make fulldoc`` option, e.g., +directories. The build process for the different documents is further described in the respective ``Readme.md`` files +in the corresponding documentation directories. + +To rebuild the full HTML and PDF versions of the specification documents, run: ``` - cd docs/format - make fulldoc +cd docs/format +make fulldoc ``` -**Where are my documents?** +## Where are my documents? The compiled documents are then located in the corresponding ``_build`` folders, e.g.: * ``format/_build/html`` and ``format/_build/latex`` for the HTML and PDF of the format specification -* ``language/_build/html`` and ``language/_build/latex`` for the HTML and PDF of the specification language * ``storage/_build/html`` and ``storage/_build/latex`` for the HTML and PDF of the storage diff --git a/docs/format/Readme.md b/docs/format/Readme.md index b399fbfd..acde4fe5 100644 --- a/docs/format/Readme.md +++ b/docs/format/Readme.md @@ -1,6 +1,6 @@ **Overview** -The NWB:N specification documentation uses Sphinx [http://www.sphinx-doc.org/en/stable/index.html](http://www.sphinx-doc.org/en/stable/index.html) +The NWB specification documentation uses Sphinx [http://www.sphinx-doc.org/en/stable/index.html](http://www.sphinx-doc.org/en/stable/index.html) **Prerequisites** diff --git a/docs/format/source/conf.py b/docs/format/source/conf.py index 655d13ce..7ff48d4f 100644 --- a/docs/format/source/conf.py +++ b/docs/format/source/conf.py @@ -76,7 +76,7 @@ def setup(app): # General information about the project. project = u'NWB Format Specification' -copyright = u'2017-2021, Neurodata Without Borders' +copyright = u'2017-2023, Neurodata Without Borders' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -85,7 +85,7 @@ def setup(app): # The short X.Y version. version = '2.6.0' # The full version, including alpha/beta/rc tags. -release = '2.6.0-alpha' +release = '2.6.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/format/source/credits.rst b/docs/format/source/credits.rst index efe9d5b0..3e209982 100644 --- a/docs/format/source/credits.rst +++ b/docs/format/source/credits.rst @@ -5,18 +5,18 @@ Credits Acknowledgments =============== -For details on the partners, members, and supporters of NWB:N please the http://www.nwb.org/ project website. +For details on the partners, members, and supporters of NWB please the http://www.nwb.org/ project website. For specific contributions to the format specification and this document see the change logs of the Git repository at https://github.com/NeurodataWithoutBorders/nwb-schema . Authors ======= -NWB:N: Version 2.0.0 and later +NWB: Version 2.0.0 and later ------------------------------ -Documentation for Version 2 of the NWB:N format and later have been created by -Oliver Ruebel and Andrew Tritt et al. in collaboration with the NWB:N community. +Documentation for Version 2 of the NWB format and later have been created by +Oliver Ruebel and Andrew Tritt et al. in collaboration with the NWB community. NWB:N: Version 1.0.x and earlier -------------------------------- diff --git a/docs/format/source/format_release_notes.rst b/docs/format/source/format_release_notes.rst index ab211127..e20c8a9b 100644 --- a/docs/format/source/format_release_notes.rst +++ b/docs/format/source/format_release_notes.rst @@ -4,14 +4,14 @@ Release Notes ============= -2.6.0-alpha (Upcoming) ----------------------- +2.6.0 (January 17, 2023) +----------------------- Minor changes ^^^^^^^^^^^^^ - Added OnePhotonSeries. (#523) - ``Subject.age`` has a new optional attribute, ``reference``, which can take a value of "birth" (default) or "gestational". (#525) -- Add "in seconds" to the doc of Units.spike_times. (#530) +- Added "in seconds" to the doc of Units.spike_times. (#530) 2.5.0 (June 14, 2022) @@ -34,7 +34,7 @@ Minor changes - Fixed dtype of ``data`` dataset of ``IndexSeries`` (int32 -> uint32). - Updated ``unit`` attribute of ``data`` to have fixed value "N/A". - Updated docstrings for the ``conversion``, ``resolution``, and ``offset`` attributes of ``data`` to indicate that - these fields are not used. + these fields are not used. - Added link to an ``Images`` object, which contains an ordered collection of images. - Discouraged use of the ``indexed_timeseries`` link to an ``ImageSeries``. - Updated ``TimeIntervals`` to use the new ``TimeSeriesReferenceVectorData`` type. This does not alter the overall structure diff --git a/docs/storage/source/credits.rst b/docs/storage/source/credits.rst index 2db81dcb..2da5cba1 100644 --- a/docs/storage/source/credits.rst +++ b/docs/storage/source/credits.rst @@ -5,16 +5,16 @@ Credits Authors ======= -NWB:N: Version 2.0.0 and later +NWB: Version 2.0.0 and later ------------------------------ -Documentation for storage of Version 2 of the NWB:N format and later have been created by -Oliver Ruebel and Andrew Tritt et al. in collaboration with the NWB:N community. +Documentation for storage of Version 2 of the NWB format and later have been created by +Oliver Ruebel and Andrew Tritt et al. in collaboration with the NWB community. Acknowledgments =============== -For details on the partners, funders, and supporters of NWB:N please the http://www.nwb.org/ project website. +For details on the partners, funders, and supporters of NWB please the http://www.nwb.org/ project website. For specific contributions to the format specification and this document see the change logs of the Git repository at https://github.com/NeurodataWithoutBorders/nwb-schema . diff --git a/docs/storage/source/storage_description.rst b/docs/storage/source/storage_description.rst index e4aec1af..b58d8a4e 100644 --- a/docs/storage/source/storage_description.rst +++ b/docs/storage/source/storage_description.rst @@ -1,31 +1,31 @@ .. _storage: ============= -NWB:N Storage +NWB Storage ============= What is the role of data storage? ================================= -The `NWB:N format specification `_ -defined using the `NWB:N specification language `_ +The `NWB format specification `_ +defined using the `NWB specification language `_ describes how to organize large collections of neuroscience data using basic primitives, e.g., Files, Groups, Datasets, Attributes, and Links to describe and hierarchically group data. The role of the data storage then is to store large collections of neuroscience data. In other words, -the role of the storage is to map NWB:N primitives (and types, i.e., neurodata_types) to persistent storage. -For an overview of the various components of the NWB:N project +the role of the storage is to map NWB primitives (and types, i.e., neurodata_types) to persistent storage. +For an overview of the various components of the NWB project see `here `_ . -How are NWB:N files stored? +How are NWB files stored? =========================== -The NWB:N format currently uses HDF5 as primary storage mechanism. The mapping of -the NWB:N format to HDF5 files is described in more detail in :numref:`sec-hdf5`. +The NWB format currently uses HDF5 as primary storage mechanism. The mapping of +the NWB format to HDF5 files is described in more detail in :numref:`sec-hdf5`. Are backends other than HDF5 supported? ======================================= -NWB:N currently only officially supports HDF5 as main storage backend. However, the PyNWB API has been +NWB currently only officially supports HDF5 as main storage backend. However, the PyNWB API has been designed to enable the design of custom read/write backends for the API, enabling other storage backends -to be mapped to NWB:N. +to be mapped to NWB. diff --git a/license.txt b/license.txt index fe86e283..ba6ea4d5 100644 --- a/license.txt +++ b/license.txt @@ -1,4 +1,4 @@ -“nwb-schema” Copyright (c) 2017-2021, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. +“nwb-schema” Copyright (c) 2017-2023, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/orig/schema.json b/orig/schema.json deleted file mode 100644 index 0aeac9d4..00000000 --- a/orig/schema.json +++ /dev/null @@ -1,2692 +0,0 @@ -{ - "fs": { - "core": { - "info": { - "description": "Specification for the core NWB (Neurodata Withoug Borders) format.", - "author": "Keith Godfrey. Converted to format specification by Jeff Teeters.", - "contact": "jteeters@berkeley.edu, keithg@alleninstitute.org", - "version": "1.0.4_beta", - "date": "June 8, 2016", - "name": "NWB file format specification" - }, - "doc": [ - { - "content": "\n

Neurodata Without Borders:\n Neurophysiology is a project to develop a unified data format for\n cellular-based neurophysiology data, focused on the dynamics of\n groups of neurons measured under a large range of experimental\n conditions. Participating labs provided use cases and critical\n feedback to the effort. The design goals for the NWB format included:

\n


\n

\n

Compatibility

\n
    \n
  • \n

    Cross-platform

    \n
  • \n
  • \n

    Support for tool makers

    \n
  • \n
\n

Usability

\n
    \n
  • \n

    Quickly develop a basic understanding of\n an experiment and its data

    \n
  • \n
  • \n

    Review an experiment's details without\n programming knowledge

    \n
  • \n
\n

Flexibility

\n
    \n
  • \n

    Accommodate an experiment's raw and\n processed data

    \n
  • \n
  • \n

    Encapsulate all of an experiment's data,\n or link to external data source when necessary

    \n
  • \n
\n

Extensibility

\n
    \n
  • \n

    Accommodate future experimental paradigms\n without sacrificing backwards compatibility.

    \n
  • \n
  • \n

    Support custom extensions when the\n standard is lacking

    \n
  • \n
\n

Longevity

\n
    \n
  • \n

    Data published in the format should be\n accessible for decades

    \n
  • \n
\n


\n

\n

Hierarchical\n Data Format (HDF) was selected for the NWB format\n because it met several of the project's requirements. First, it is\n a mature data format standard with libraries available in multiple\n programming languages. Second, the format's hierarchical structure\n allows data to be grouped into logical self-documenting sections. Its\n structure is analogous to a file system in which its \"groups\" and\n \"datasets\" correspond to directories and files. Groups and\n datasets can have attributes that provide additional details, such as\n authorities' identifiers. Third, its linking feature enables data\n stored in one location to be transparently accessed from multiple\n locations in the hierarchy. The linked data can be external to the\n file. Fourth,\n HDFView,\n a free, cross-platform application, can be used to open a file and\n browse data. Finally, ensuring the ongoing accessibility of\n HDF-stored data is the mission of The HDF Group, the nonprofit that\n is the steward of the technology.

\n


\n

\n

The NWB format standard is codified in\n a schema file written in a specification language created for this\n project. The specification language describes the schema, including\n data types and associations. A new schema file will be published for\n each revision of the NWB format standard. Data publishers can use the\n specification language to extend the format in order to store types\n of data not managed by the base format.\n

\n ", - "level": 0, - "location": { - "position": "after", - "id": "_toc_top" - }, - "id": "Introduction", - "description": "Introduction section." - }, - { - "content": "\n

\n In this document (and in the specification language used to define\n the format) an identifier enclosed in angle brackets (e.g.\n \"<ElectricalSeries>\")\n denotes a group or dataset with a \"variable\" name. That is, the name\n within the HDF5 file is set by the application\n creating the file and multiple instances may be created within the same\n group (each having a unique name).\n Identifiers that are not enclosed in angle brackets\n (e.g. \"CompassDirection\") are\n the actual name of the group or dataset within the HDF5 file.\n There can only be one instance within a given group since the name is fixed.\n

\n ", - "level": 1, - "id": "Naming conventions", - "location": { - "position": "after", - "id": "Introduction" - } - }, - { - "content": "\n

\n In some instances, the specification refers to HDF5 links. When links are\n made within the file, HDF5 soft-links (and not hard-links) should be used. This\n is because soft-links distinguish between the link and the target of the link,\n whereas hard-links cause multiple names (paths) to be created for the\n target, and there is no way to determine which of these names are preferable\n in a given situation. If the target of a soft link\n is removed (or moved to another location in the HDF5 file)—both of\n which can be done using the HDF5 API—then the soft link will \"dangle,\" that\n is point to a target that no longer exists. For this reason, moving or removing\n targets of soft links should be avoided unless the links are updated to point\n to the new location.\n ", - "level": 1, - "id": "Link types", - "location": { - "position": "after", - "id": "Naming conventions" - } - }, - { - "content": "\n

\n In the format, the value of some datasets and attributes can usually be determined\n automatically from other parts of the HDF5 file. For example, a dataset that\n has as value the target of a link can be determined automatically from a list\n of links in the HDF5 file. When possible, the NWB API will automatically create\n such components and required groups. The components (datasets, attributes and\n required groups) that are automatically created by the API are\n indicated by the phrase (Automatically\n created) in the description or comment. The creation of these components is\n specified by the \"autogen\" option in the format specification language. This is\n not a part of the format (different API's may create the data files in\n different ways). The information is included for the convenience of those using\n the NWB API and also for developers of other APIs who may wish to also auto-generate\n these components.

\n ", - "level": 1, - "id": "Automatically created components", - "location": { - "position": "after", - "id": "Link types" - } - }, - { - "content": "\n
\n

The content of these organizational\n groups is more fully described in the section titled,\n File organization.\n The NWB format is\n based on TimeSeries\n and Modules and these are defined first.

\n


\n

\n

NWB stores general optical and\n electrical physiology data in a way that should be understandable to\n a naive user after a few minutes using looking at the file in an HDF5\n browser, such as HDFView. The format is designed to be friendly to\n and usable by software tools and analysis scripts, and to impose few\n a priori assumptions about data representation and analysis. Metadata\n required to understand the data itself (core metadata) is generally\n stored with the data. Information required to interpret the\n experiment (general metadata) is stored in the group 'general'. Most\n general metadata is stored in free-form text fields. Machine-readable\n metadata is stored as attributes on these free-form text fields.

\n


\n

\n

The only API assumed necessary to read\n a NWB file is an HDF5 library (e.g., h5py in python, libhdf5 in C,\n JHI5 in Java).

\n ", - "location": { - "position": "post", - "id": "Top level groups" - }, - "id": "top_level_groups_post", - "description": "Content under table of top level groups" - }, - { - "content": "\n

Top-level\n datasets are for file identification and version information.

\n ", - "location": { - "position": "mid", - "id": "Top level datasets" - }, - "id": "top_level_datasets_mid", - "description": "Text place after header for top-level datasets but before table" - }, - { - "content": "\n


\n

\n All times are stored in seconds using double precision (64 bit) floating\n point values. A smaller floating point value, e.g. 32 bit, is not permitted\n for storing times.\n This is because significant errors for time can result from using smaller data sizes.\n Throughout this document, sizes (number of bits) are provided\n for many datatypes (e.g. float32).\n If the size is followed by \"!\" then the size is the\n minimum size, otherwise it is the recommended size. For fields with a\n recommended size, larger or smaller sizes can be used (and for integer types both signed and\n unsigned), so long as the selected size encompasses the full range of\n data, and for floats, without loss of significant precision. Fields that have\n a minimum size can use larger, but not smaller sizes.\n

\n ", - "location": { - "position": "post", - "id": "Top level datasets" - }, - "id": "top_level_datasets_post", - "description": "Text place after table for top-level datasets" - }, - { - "content": "\n

The file\n format is designed around a data structure called a TimeSeries\n which stores time-varying data. A TimeSeries is a superset of\n several neurodata_types, including signal events, image stacks and\n experimental events. To account for different storage requirements\n and different modalities, a TimeSeries is defined in a minimal\n form and it can be extended, or subclassed, to account for different\n modalities and data storage requirements. When a TimeSeries is\n extended, it means that the 'subclassed' instance maintains\n or changes each of\n the components (eg, groups and datasets) of its parent and may have\n new groups and/or datasets of its own. The TimeSeries makes\n this process of defining such pairs more hierarchical.

\n


\n

\n

Each TimeSeries has its own HDF5\n group, and all datasets belonging to a TimeSeries are in that\n group. The group contains time and data components and users are free\n to add additional fields as necessary. There are two time objects\n represented. The first, timestamps, stores time information\n that is corrected to the experiment's time base (i.e., aligned to a\n master clock, with time-zero aligned to the starting time of the\n experiment). This field is used for data processing and subsequent\n scientific analysis. The second, sync, is an optional group\n that can be used to store the sample times as reported by the\n acquisition/stimulus hardware, before samples are converted to a\n common timebase and corrected relative to the master clock. This\n approach allows the NWB format to support streaming of data directly\n from hardware sources.

\n ", - "level": 0, - "location": { - "position": "before", - "id": "" - }, - "id": "TimeSeries", - "description": "Text place before /, included in table of contents" - }, - { - "content": "\n

When data is streamed from experiment\n hardware it should be stored in an HDF5 dataset having the same\n attributes as data, with time information stored as necessary.\n This allows the raw data files to be separate file-system objects\n that can be set as read-only once the experiment is complete.\n TimeSeries objects in /acquisition will link to the data\n field in the raw time series. Hardware-recorded time data must be\n corrected to a common time base (e.g., timestamps from all hardware\n sources aligned) before it can be included in timestamps. The\n uncorrected time can be stored in the sync group.

\n


\n

\n

The group holding the TimeSeries\n can be used to store additional information (HDF5 datasets) beyond\n what is required by the specification. I.e., an end user is free to\n add additional key/value pairs as necessary for their needs. It\n should be noted that such lab-specific extensions may not be\n recognized by analysis tools/scripts existing outside the lab.\n Extensions are described in section\n Extending the format).

\n


\n

\n

The data element in the\n TimeSeries will typically be an array of any valid HDF5 data\n type (e.g., a multi-dimentsional floating point array). The data\n stored can be in any unit. The attributes of the data field must\n indicate the SI unit that the data relates to (or appropriate\n counterpart, such as color-space) and the multiplier necessary to\n convert stored values to the specified SI unit.

\n ", - "location": { - "position": "post", - "id": "" - }, - "id": "_post", - "description": "Text place after /, not included in table of contents" - }, - { - "content": "\n

The TimeSeries\n is a data structure/object. It can be \"subclassed\" (or extended)\n to represent\n more narrowly focused modalities (e.g., electrical versus optical\n physiology) as well as new modalities (eg, video tracking of whisker\n positions). When it a TimeSeries is subclassed, new datasets\n can be added while all datasets of parent classes are\n either preserved as specified in the parent class\n or replaced by a new definition (changed). In the tables\n that follow, identifiers in the \"Id\" column that change the definition in\n the parent\n class are underlined. An\n initial set of subclasses are described here. Users are free to\n define subclasses for their particular requirements. This can be done\n by creating an extension to the format defining a\n new TimeSeries subclass (see\n Extending the format).

\n

All datasets that are defined to be\n part of TimeSeries have the text attribute 'unit' that stores the\n unit specified in the documentation.

\n ", - "level": 0, - "location": { - "position": "before", - "id": "" - }, - "id": "TimeSeries Class Hierarchy", - "description": "Text placed before /, included in table of contents" - }, - { - "content": "\n

\n NWB uses modules to store data for—and represent the results of—common\n data processing steps, such as spike sorting and image segmentation, that occur\n before scientific analysis of the data. Modules store the data\n used by software tools to calculate these intermediate results. Each\n module provides a list of the data it makes available, and it is free\n to provide whatever additional data that the module generates.\n Additional documentation is required for data that goes beyond\n standard definitions. All modules are stored directly under\n group /processing. The name of each module\n is chosen by the data provider (i.e. modules have a \"variable\" name).\n The particular data within each module is specified by one or more\n interfaces, which are groups residing directly within a module.\n Each interface extends (contains the attributes in) group\n \"><Interface> and has a fixed\n name (e.g. ImageSegmentation) that suggests the\n type of data it contains. The names of the interfaces within a given\n module are listed in the \"interfaces\" attribute for the module.\n The different types of Interfaces are described below.\n


\n ", - "level": 0, - "location": { - "position": "before", - "id": "" - }, - "id": "Modules", - "description": "Text placed before /, included in table of contents" - }, - { - "content": "\n

When converting data from another\n format into NWB, there will be times that some data, particularly the\n raw data in acquisition and stimulus, is not included\n as part of the conversion. In such cases, a TimeSeries should\n be created that represents the missing data, even if the contents of\n that TimeSeries are empty. This helps to interpret the data in\n the file.

\n ", - "location": { - "position": "post", - "id": "/acquisition" - }, - "id": "/acquisition_post", - "description": "Text placed after acquisition table" - }, - { - "content": "\n\n

\n The data organization presented in this document constitutes the core NWB\n format. Extensibility is handled by allowing users to store additional\n data as necessary using new datasets, attributes or groups. There are\n two ways to document these additions. The first is to add an attribute\n \"schema_id\" with value the string \"Custom\" to the additional groups\n or datasets, and provide documentation to describe the extra data if\n it is not clear from the context what the data represent. This method\n is simple but does not include a consistant way to describe\n the additions. The second method is to write an\n extension to the format. With this method, the additions are\n describe by the extension and attribute \"schema_id\" is set to\n the string \"namespace:id\" where namespace\n is the namespace of the extension, and id\n is the identifier of the structure within the namespace.\n Extensions to the format are written\n using the same specification language that is used to define the\n core format. Creating an extension allows adding the new data to the file\n through the API, validating files containing extra data, and also\n generating documentation for the additions.\n Popular extensions can be proposed\n and added to the official format specification.\n Writing and using extensions are described in the API documentation.\n Both methods allow extensibility without breaking backward compatibility.\n

\n ", - "level": 0, - "location": { - "position": "after", - "id": "_toc_bottom" - }, - "id": "Extending the format", - "description": "Describes how to extend the format." - }, - { - "content": "\n

The Neurodata Without Borders:\nNeurophysiology Initiative is funded by GE, the Allen Institute for\nBrain Science, the Howard Hughes Medical Institute (HHMI), The Kavli\nFoundation and the International Neuroinformatics Coordinating\nFacility. Our founding scientific partners are the Allen Institute,\nthe Svoboda Lab at the Janelia Research Campus of HHMI, the Meister\nLab at the California Institute of Technology, the Buzsaki Lab at\nNew York University School of Medicine, and the University of\nCalifornia, Berkeley. Ovation.io is our founding development partner.\nKen Harris at University College London provided invaluable input and\nadvice.

\n


\n ", - "level": 0, - "id": "Acknowledgements", - "location": { - "position": "after", - "id": "Extending the format" - } - }, - { - "content": "\n

1.0.3-Beta June 2016

\n

Generate documentation directly from format specification file.\"
\nChange ImageSeries external_file to an array. Added attribute\nstarting_frame.
\nMade TimeSeries description and comments recommended.
\n

Added IZeroClampSeries.`

\n


\n\n\n

1.0.3 April, 2016

\n

Renamed \"ISI_Retinotopy\" to \"ISIRetinotopy\"
\nChange ImageSeries external_file to an array. Added attribute\nstarting_frame.
\n

Added IZeroClampSeries.

\n


\n\n

\n

1.0.2 February, 2016

\n

Fixed documentation error, updating\n'neurodata_version' to 'nwb_version'

\n

Created ISI_Retinotopy interface

\n

In ImageSegmentation module, moved\npix_mask::weight attribute to be its own dataset, named\npix_mask_weight. Attribute proved inadequate for storing sufficiently\nlarge array data for some segments

\n

Moved 'gain' field from\nCurrent/VoltageClampSeries to parent PatchClampSeries, due need of\nstimuli to sometimes store gain

\n

Added Ken Harris to the\nAcknowledgements section

\n


\n\n

\n

1.0.1 October 7th, 2015

\n

Added 'required' field to tables in the\ndocumentation, to indicate if group/dataset/attribute is required,\nstandard or optional

\n

Obsoleted 'file_create_date' attribute\n'modification_time' and made file_create_date a text array

\n

Removed 'resistance_compensation' from\nCurrentClampSeries due being duplicate of another field

\n

Upgraded TwoPhotonSeries::imaging_plane\nto be a required value

\n

Removed 'tags' attribute to group\n'epochs' as it was fully redundant with the 'epoch/tags' dataset

\n

Added text to the documentation stating\nthat specified sizes for integer values are recommended sizes, while\nsizes for floats are minimum sizes

\n

Added text to the documentation stating\nthat, if the TimeSeries::data::resolution attribute value is unknown\nthen store a NaN

\n


\n\n

\n

1.0.0 September 28th, 2015

\n

Convert document to .html

\n


\n\n

\n

Design\nnotes

\n

1.0.1

\n

Declaring the following groups as\nrequired (this was implicit before)\n

\n


\n\n

\n

acquisition/

\n

_ images/

\n

_ timeseries/

\n

analysis/

\n

epochs/

\n

general/

\n

processing/

\n

stimulus/

\n

_ presentation/

\n

_ templates/

\n


\n\n

\n

This is to ensure consistency between\n.nwb files, to provide a minimum expected structure, and to avoid\nconfusion by having someone expect time series to be in places\nthey're not. I.e., if 'acquisition/timeseries' is not present,\nsomeone might reasonably expect that acquisition time series might\nreside in 'acquisition/'. It is also a subtle reminder about what the\nfile is designed to store, a sort of built-in documentation.\nSubfolders in 'general/' are only to be included as needed. Scanning\n'general/' should provide the user a quick idea what the experiment\nis about, so only domain-relevant subfolders should be present (e.g.,\n'optogenetics' and 'optophysiology'). There should always be a\n'general/devices', but it doesn't seem worth making it mandatory\nwithout making all subfolders mandatory here.\n

\n


\n\n

\n

TwoPhotonSeries::imaging_plane was\nupgraded to mandatory to help enforce inclusion of important metadata\nin the file.

\n


\n\n

\n

The listed size of integers is the\nsuggested size. What's important for integers is simply that the\ninteger is large enough to store the required data, and preferably\nnot larger. For floating point, double is required for timestamps,\nwhile floating point is largely sufficient for other uses. This is\nwhy doubles (float64) are stated in some places. Because floating\npoint sizes are provided, integer sizes are provided as well.

\n


\n\n

\n

Why do timestamps_link and data_link\nrecord linking between datasets, but links between epochs and\ntimeseries are not recorded?

\n

\nEpochs\nhave a hardlink to entire timeseries (ie, the HDF5 group). If 100\nepochs link to a time series, there is only one time series. The data\nand timestamps within it are not shared anywhere (at least from the\nepoch linking). An\nepoch is an entity that is put in for convenience and annotation so\nthere isn't necessarily an important association between what epochs\nlink to what time series (all epochs could link to all time series).

\n

\n
\n\n

\n

\nThe\ntimestamps_link and data_link fields refer to links made between time\nseries, such as if timeseries A and timeseries B, each having\ndifferent data (or time) share time (or data). This is much more\nimportant information as it shows structural associations in the\ndata.

\n ", - "level": 0, - "id": "Change history", - "location": { - "position": "after", - "id": "Acknowledgements" - } - } - ], - "schema": { - "LFP/": { - "merge": [ - "/" - ], - "include": { - "/+": {} - }, - "description": "LFP data from one or more channels. The electrode map in each published ElectricalSeries will identify which channels are providing LFP data. Filter properties should be noted in the ElectricalSeries description or comments field.", - "attributes": { - "help?": { - "const": true, - "value": "LFP data from one or more channels. Filter properties should be noted in the ElectricalSeries", - "data_type": "text" - } - } - }, - "BehavioralTimeSeries/": { - "merge": [ - "/" - ], - "include": { - "/*": {} - }, - "description": "TimeSeries for storing Behavoioral time series data.See description of BehavioralEpochs for more details.", - "attributes": { - "help?": { - "const": true, - "value": "General container for storing continuously sampled behavioral data.", - "data_type": "text" - } - } - }, - "UnitTimes/": { - "unit_list": { - "references": "/", - "autogen": { - "trim": true, - "type": "names", - "target": "", - "qty": "*" - }, - "description": "List of units present.", - "data_type": "text", - "dimensions": [ - "num_units" - ] - }, - "merge": [ - "/" - ], - "/+": { - "unit_description": { - "description": "Description of the unit (eg, cell type).", - "data_type": "text" - }, - "source^": { - "description": "Name, path or description of where unit times originated. This is necessary only if the info here differs from or is more fine-grained than the interface's source field", - "data_type": "text" - }, - "description": "Group storing times for <unit_N>.", - "times": { - "description": "Spike time for the units (exact or estimated)", - "data_type": "float64!", - "dimensions": [ - "num_events" - ] - } - }, - "description": "Event times of observed units (e.g. cell, synapse, etc.). The UnitTimes group contains a group for each unit. The name of the group should match the value in the source module, if that is possible/relevant (e.g., name of ROIs from Segmentation module).", - "attributes": { - "help?": { - "const": true, - "value": "Estimated spike times from a single unit", - "data_type": "text" - } - } - }, - "/general/intracellular_ephys/?": { - "filtering?": { - "description": "Description of filtering used. COMMENT: Includes filtering type and parameters, frequency fall- off, etc. If this changes between TimeSeries, filter description should be stored as a text attribute for each TimeSeries.", - "data_type": "text" - }, - "description": "Metadata related to intracellular electrophysiology", - "/": { - "initial_access_resistance?": { - "description": "Initial access resistance", - "data_type": "text" - }, - "location?": { - "description": "Area, layer, comments on estimation, stereotaxis coordinates (if in vivo, etc)", - "data_type": "text" - }, - "device?": { - "description": "Name(s) of devices in general/devices", - "data_type": "text" - }, - "seal?": { - "description": "Information about seal used for recording", - "data_type": "text" - }, - "description": { - "description": "Recording description, description of electrode (e.g., whole-cell, sharp, etc)COMMENT: Free-form text (can be from Methods)", - "data_type": "text" - }, - "_description": "One of possibly many. COMMENT: Name should be informative.", - "filtering?": { - "description": "Electrode specific filtering.", - "data_type": "text" - }, - "slice?": { - "description": "Information about slice used for recording", - "data_type": "text" - }, - "resistance?": { - "description": "Electrode resistance COMMENT: unit: Ohm", - "data_type": "text" - } - } - }, - "/processing/": { - "/*": { - "attributes": { - "description?": { - "description": "Description of Module", - "data_type": "text" - }, - "interfaces": { - "autogen": { - "trim": true, - "qty": "*", - "type": "names", - "target": "<*>/", - "tsig": { - "type": "group", - "attrs": { - "neurodata_type": "Interface" - } - } - }, - "description": "Names of the data interfaces offered by this module. COMMENT: E.g., [0]=\"EventDetection\", [1]=\"Clustering\", [2]=\"FeatureExtraction\"", - "data_type": "text", - "dimensions": [ - "num_interfaces" - ] - }, - "neurodata_type": { - "value": "Module", - "const": true, - "description": "The string \"Module\"", - "data_type": "text" - } - }, - "include": { - "/*": { - "_options": { - "subclasses": true - } - } - }, - "description": "Module. Name should be descriptive. Stores a collection of related data organized by contained interfaces. Each interface is a contract specifying content related to a particular type of data." - }, - "description": "The home for processing Modules. These modules perform intermediate analysis of data that is necessary to perform before scientific analysis. Examples include spike clustering, extracting position from tracking data, stitching together image slices. COMMENT: Modules are defined below. They can be large and express many data sets from relatively complex analysis (e.g., spike detection and clustering) or small, representing extraction of position information from tracking video, or even binary lick/no-lick decisions. Common software tools (e.g., klustakwik, MClust) are expected to read/write data here. MORE_INFO: 'Processing' refers to intermediate analysis of the acquired data to make it more amenable to scientific analysis. These are performed using Modules, as defined above. All modules reside in the processing group.", - "autogen": { - "type": "create" - } - }, - "/": { - "merge": [ - "/" - ], - "description": "General image data that is common between acquisition and stimulus time series. Sometimes the image data is stored in the HDF5 file in a raw format while other times it will be stored as an external image file in the host file system. The data field will either be binary data or empty. TimeSeries::data array structure: [frame] [y][x] or [frame][z][y][x].", - "_required": { - "ext_data": [ - "external_file XOR data", - "Either 'external_file' or 'data' must be specified, but not both" - ] - }, - "dimension^": { - "description": "Number of pixels on x, y, (and z) axes.", - "data_type": "int32", - "dimensions": [ - "rank" - ] - }, - "attributes": { - "help?": { - "const": true, - "value": "Storage object for time-series 2-D image data", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "ImageSeries" - ], - "const": true, - "dimensions": [ - "2" - ], - "data_type": "text" - } - }, - "format^": { - "description": "Format of image. If this is 'external' then the field external_file contains the path or URL information to that file. For tiff, png, jpg, etc, the binary representation of the image is stored in data. If the format is raw then the fields bit_per_pixel and dimension are used. For raw images, only a single channel is stored (eg, red).", - "data_type": "text" - }, - "data": { - "description": "Either binary data containing image or empty.", - "data_type": "number", - "dimensions": [ - [ - "x", - "y" - ], - [ - "frame", - "y", - "x" - ], - [ - "frame", - "z", - "y", - "x" - ] - ] - }, - "bits_per_pixel^": { - "description": "Number of bit per image pixel.", - "data_type": "int32" - }, - "external_file?": { - "attributes": { - "starting_frame": { - "dimensions": [ - "num_files" - ], - "data_type": "int", - "description": "Each entry is the frame number (within the full ImageSeries) of the first frame in the corresponding external_file entry. This serves as an index to what frames each file contains, allowing random access.Zero-based indexing is used. (The first element will always be zero)." - } - }, - "description": "Path or URL to one or more external file(s). Field only present if format=external. NOTE: this is only relevant if the image is stored in the file system as one or more image file(s). This field should NOT be used if the image is stored in another HDF5 file and that file is HDF5 linked to this file.", - "data_type": "text", - "dimensions": [ - "num_files" - ] - } - }, - "/": { - "merge": [ - "/" - ], - "description": "Stores recorded voltage data from intracellular recordings when all current and amplifier settings are off (i.e., CurrentClampSeries fields will be zero). There is no CurrentClampStimulusSeries associated with an IZero series because the amplifier is disconnected and no stimulus can reach the cell.", - "attributes": { - "help?": { - "const": true, - "value": "Voltage from intracellular recordings when all current and amplifier settings are off", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "PatchClampSeries", - "CurrentClampSeries", - "IZeroClampSeries" - ], - "const": true, - "dimensions": [ - "4" - ], - "data_type": "text" - } - } - }, - "FilteredEphys/": { - "merge": [ - "/" - ], - "include": { - "/+": {} - }, - "description": "Ephys data from one or more channels that has been subjected to filtering. Examples of filtered data include Theta and Gamma (LFP has its own interface). FilteredEphys modules publish an ElectricalSeries for each filtered channel or set of channels. The name of each ElectricalSeries is arbitrary but should be informative. The source of the filtered data, whether this is from analysis of another time series or as acquired by hardware, should be noted in each's TimeSeries::description field. There is no assumed 1::1 correspondence between filtered ephys signals and electrodes, as a single signal can apply to many nearby electrodes, and one electrode may have different filtered (e.g., theta and/or gamma) signals represented.", - "attributes": { - "help?": { - "const": true, - "value": "Ephys data from one or more channels that is subjected to filtering, such as for gamma or theta oscillations (LFP has its own interface). Filter properties should be noted in the ElectricalSeries", - "data_type": "text" - } - } - }, - "EventDetection/": { - "source_idx": { - "references": "source_electrical_series/data.num_times", - "description": "Indices (zero-based) into source ElectricalSeries::data array corresponding to time of event. Module description should define what is meant by time of event (e.g., .25msec before action potential peak, zero-crossing time, etc). The index points to each event from the raw data", - "data_type": "int32", - "dimensions": [ - "num_events" - ] - }, - "description": "Detected spike events from voltage trace(s).", - "detection_method": { - "description": "Description of how events were detected, such as voltage threshold, or dV/dT threshold, as well as relevant values.", - "data_type": "text" - }, - "source_electricalseries/": { - "link": { - "allow_subclasses": false, - "target_type": "/" - }, - "description": "HDF5 link to ElectricalSeries that this data was calculated from. Metadata about electrodes and their position can be read from that ElectricalSeries so it's not necessary to mandate that information be stored here" - }, - "times": { - "description": "Timestamps of events, in Seconds", - "data_type": "float64!", - "dimensions": [ - "num_events" - ], - "attributes": { - "unit": { - "value": "Seconds", - "description": "The string \"Seconds\"", - "data_type": "text" - } - } - }, - "merge": [ - "/" - ], - "source_electricalseries_path": { - "autogen": { - "trim": false, - "format": "path is $t", - "type": "link_path", - "target": "source_electricalseries/", - "qty": "!" - }, - "description": "Path to linked ElectricalSeries.", - "data_type": "text" - } - }, - "/": { - "description": "Stores stimulus or response current or voltage. Superclass definition for patch-clamp data (this class should not be instantiated directly).", - "gain^": { - "description": "Units: Volt/Amp (v-clamp) or Volt/Volt (c-clamp)", - "data_type": "float" - }, - "electrode_name": { - "references": "/general/intracellular_ephys//", - "description": "Name of electrode entry in /general/intracellular_ephys.", - "data_type": "text" - }, - "merge": [ - "/" - ], - "attributes": { - "help?": { - "const": true, - "value": "Superclass definition for patch-clamp data", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "PatchClampSeries" - ], - "const": true, - "dimensions": [ - "2" - ], - "data_type": "text" - } - }, - "data": { - "description": "Recorded voltage or current.", - "data_type": "number", - "dimensions": [ - "num_times" - ] - } - }, - "BehavioralEvents/": { - "merge": [ - "/" - ], - "include": { - "/*": {} - }, - "description": "TimeSeries for storing behavioral events. See description of BehavioralEpochs for more details.", - "attributes": { - "help?": { - "const": true, - "value": "Position data, whether along the x, xy or xyz axis", - "data_type": "text" - } - } - }, - "EyeTracking/": { - "merge": [ - "/" - ], - "include": { - "/*": {} - }, - "description": "Eye-tracking data, representing direction of gaze.", - "attributes": { - "help?": { - "const": true, - "value": "Eye-tracking data, representing direction of gaze", - "data_type": "text" - } - } - }, - "/": { - "merge": [ - "/" - ], - "data": { - "attributes": { - "conversion": { - "const": true, - "value": "float('NaN')" - }, - "resolution": { - "const": true, - "value": "float('NaN')" - }, - "unit": { - "const": true, - "value": "n/a" - } - }, - "description": ">0 if interval started, <0 if interval ended.", - "data_type": "int8", - "dimensions": [ - "num_times" - ] - }, - "description": "Stores intervals of data. The timestamps field stores the beginning and end of intervals. The data field stores whether the interval just started (>0 value) or ended (<0 value). Different interval types can be represented in the same series by using multiple key values (eg, 1 for feature A, 2 for feature B, 3 for feature C, etc). The field data stores an 8-bit integer. This is largely an alias of a standard TimeSeries but that is identifiable as representing time intervals in a machine-readable way.", - "attributes": { - "help?": { - "const": true, - "value": "Stores the start and stop times for events", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "IntervalSeries" - ], - "const": true, - "dimensions": [ - "2" - ], - "data_type": "text" - } - } - }, - "/": { - "site": { - "references": "/general/optogenetics//", - "description": "Name of site description in general/optogentics.", - "data_type": "text" - }, - "merge": [ - "/" - ], - "data": { - "attributes": { - "unit": { - "value": "watt", - "data_type": "text" - } - }, - "description": "Applied power for optogenetic stimulus.", - "data_type": "float32", - "dimensions": [ - "num_times" - ] - }, - "description": "Optogenetic stimulus. The data[] field is in unit of watts.", - "attributes": { - "help?": { - "const": true, - "value": "Optogenetic stimulus", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "OptogeneticSeries" - ], - "const": true, - "dimensions": [ - "2" - ], - "data_type": "text" - } - } - }, - "ImagingRetinotopy/": { - "description": "Intrinsic signal optical imaging or widefield imaging for measuring retinotopy. Stores orthogonal maps (e.g., altitude/azimuth; radius/theta) of responses to specific stimuli and a combined polarity map from which to identify visual areas.
Note: for data consistency, all images and arrays are stored in the format [row][column] and [row, col], which equates to [y][x]. Field of view and dimension arrays may appear backward (i.e., y before x).", - "focal_depth_image": { - "attributes": { - "focal_depth^": { - "description": "Focal depth offset, in meters", - "data_type": "float" - }, - "bits_per_pixel": { - "description": "Number of bits used to represent each value. This is necessary to determine maximum (white) pixel value", - "data_type": "int32" - }, - "field_of_view^": { - "dimensions": [ - "row_col" - ], - "data_type": "float", - "description": "Size of viewing area, in meters" - }, - "dimension": { - "dimensions": [ - "row_col" - ], - "data_type": "int32", - "description": "Number of rows and columns in the image. NOTE: row, column representation is equivalent to height,width." - }, - "format": { - "description": "Format of image. Right now only 'raw' supported", - "data_type": "text" - } - }, - "description": "Gray-scale image taken with same settings/parameters (e.g., focal depth, wavelength) as data collection. Array format: [rows][columns]", - "data_type": "uint16", - "dimensions": [ - "num_rows", - "num_cols" - ] - }, - "sign_map": { - "attributes": { - "field_of_view^": { - "dimensions": [ - "row_col" - ], - "data_type": "float", - "description": "Size of viewing area, in meters." - }, - "dimension": { - "dimensions": [ - "row_col" - ], - "data_type": "int32", - "description": "Number of rows and columns in the image. NOTE: row, column representation is equivalent to height,width." - } - }, - "description": "Sine of the angle between the direction of the gradient in axis_1 and axis_2", - "data_type": "float32", - "dimensions": [ - "num_rows", - "num_cols" - ] - }, - "vasculature_image": { - "attributes": { - "bits_per_pixel": { - "description": "Number of bits used to represent each value. This is necessary to determine maximum (white) pixel value", - "data_type": "int32" - }, - "field_of_view^": { - "dimensions": [ - "row_col" - ], - "data_type": "float", - "description": "Size of viewing area, in meters" - }, - "dimension": { - "dimensions": [ - "row_col" - ], - "data_type": "int32", - "description": "Number of rows and columns in the image. NOTE: row, column representation is equivalent to height,width." - }, - "format": { - "description": "Format of image. Right now only 'raw' supported", - "data_type": "text" - } - }, - "description": "Gray-scale anatomical image of cortical surface. Array structure: [rows][columns]", - "data_type": "uint16", - "dimensions": [ - "num_rows", - "num_cols" - ] - }, - "attributes": { - "help?": { - "const": true, - "value": "Intrinsic signal optical imaging or Widefield imaging for measuring retinotopy", - "data_type": "text" - } - }, - "axis_1_phase_map": { - "attributes": { - "field_of_view": { - "row_col": { - "type": "structure", - "components": [ - { - "alias": "row", - "unit": "meter" - }, - { - "alias": "column", - "unit": "meter" - } - ] - }, - "dimensions": [ - "row_col" - ], - "data_type": "float", - "description": "Size of viewing area, in meters" - }, - "dimension": { - "dimensions": [ - "row_col" - ], - "data_type": "int32", - "description": "Number of rows and columns in the image. NOTE: row, column representation is equivalent to height,width." - }, - "unit": { - "description": "Unit that axis data is stored in (e.g., degrees)", - "data_type": "text" - } - }, - "description": "Phase response to stimulus on the first measured axis", - "data_type": "float32", - "dimensions": [ - "num_rows", - "num_cols" - ] - }, - "axis_2_phase_map": { - "attributes": { - "field_of_view": { - "dimensions": [ - "row_col" - ], - "data_type": "float", - "description": "Size of viewing area, in meters" - }, - "dimension": { - "dimensions": [ - "row_col" - ], - "data_type": "int32", - "description": "Number of rows and columns in the image. NOTE: row, column representation is equivalent to height,width." - }, - "unit": { - "description": "Unit that axis data is stored in (e.g., degrees)", - "data_type": "text" - } - }, - "description": "Phase response to stimulus on the second measured axis", - "data_type": "float32", - "dimensions": [ - "num_rows", - "num_cols" - ] - }, - "axis_descriptions": { - "description": "Two-element array describing the contents of the two response axis fields. Description should be something like ['altitude', 'azimuth'] or '['radius', 'theta']", - "data_type": "text", - "dimensions": [ - "2" - ] - }, - "merge": [ - "/" - ], - "axis_2_power_map^": { - "attributes": { - "field_of_view^": { - "dimensions": [ - "row_col" - ], - "data_type": "float", - "description": "Size of viewing area, in meters" - }, - "dimension": { - "dimensions": [ - "row_col" - ], - "data_type": "int32", - "description": "Number of rows and columns in the image. NOTE: row, column representation is equivalent to height,width." - }, - "unit": { - "description": "Unit that axis data is stored in (e.g., degrees)", - "data_type": "text" - } - }, - "description": "Power response on the second measured axis. Response is scaled so 0.0 is no power in the response and 1.0 is maximum relative power.", - "data_type": "float32", - "dimensions": [ - "num_rows", - "num_cols" - ] - }, - "axis_1_power_map^": { - "attributes": { - "field_of_view^": { - "dimensions": [ - "row_col" - ], - "data_type": "float", - "description": "Size of viewing area, in meters" - }, - "dimension": { - "dimensions": [ - "row_col" - ], - "data_type": "int32", - "description": "Number of rows and columns in the image. NOTE: row, column representation is equivalent to height,width." - }, - "unit": { - "description": "Unit that axis data is stored in (e.g., degrees)", - "data_type": "text" - } - }, - "description": "Power response on the first measured axis. Response is scaled so 0.0 is no power in the response and 1.0 is maximum relative power.", - "data_type": "float32", - "dimensions": [ - "num_rows", - "num_cols" - ] - } - }, - "CompassDirection/": { - "merge": [ - "/" - ], - "include": { - "/*": {} - }, - "description": "With a CompassDirection interface, a module publishes a SpatialSeries object representing a floating point value for theta. The SpatialSeries::reference_frame field should indicate what direction corresponds to 0 and which is the direction of rotation (this should be clockwise). The si_unit for the SpatialSeries should be radians or degrees.", - "attributes": { - "help?": { - "const": true, - "value": "Direction as measured radially. Spatial series reference frame should indicate which direction corresponds to zero and what is the direction of positive rotation", - "data_type": "text" - } - } - }, - "BehavioralEpochs/": { - "merge": [ - "/" - ], - "include": { - "/*": {} - }, - "description": "TimeSeries for storing behavoioral epochs. The objective of this and the other two Behavioral interfaces (e.g. BehavioralEvents and BehavioralTimeSeries) is to provide generic hooks for software tools/scripts. This allows a tool/script to take the output one specific interface (e.g., UnitTimes) and plot that data relative to another data modality (e.g., behavioral events) without having to define all possible modalities in advance. Declaring one of these interfaces means that one or more TimeSeries of the specified type is published. These TimeSeries should reside in a group having the same name as the interface. For example, if a BehavioralTimeSeries interface is declared, the module will have one or more TimeSeries defined in the module sub-group \"BehavioralTimeSeries\". BehavioralEpochs should use IntervalSeries. BehavioralEvents is used for irregular events. BehavioralTimeSeries is for continuous data.", - "attributes": { - "help?": { - "const": true, - "value": "General container for storing behavorial epochs", - "data_type": "text" - } - } - }, - "/": { - "pmt_gain^": { - "description": "Photomultiplier gain", - "data_type": "float32" - }, - "field_of_view^": { - "whd": { - "type": "struct", - "components": [ - { - "alias": "width", - "unit": "meter" - }, - { - "alias": "height", - "unit": "meter" - }, - { - "alias": "depth", - "unit": "meter" - } - ] - }, - "description": "Width, height and depth of image, or imaged area (meters).", - "data_type": "float32", - "dimensions": [ - "whd" - ] - }, - "description": "A special case of optical imaging.", - "merge": [ - "/" - ], - "attributes": { - "help?": { - "const": true, - "value": "Image stack recorded from 2-photon microscope", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "ImageSeries", - "TwoPhotonSeries" - ], - "const": true, - "dimensions": [ - "3" - ], - "data_type": "text" - } - }, - "imaging_plane": { - "references": "/general/optophysiology/", - "description": "Name of imaging plane description in /general/optophysiology.", - "data_type": "text" - }, - "scan_line_rate^": { - "description": "Lines imaged per second. This is also stored in /general/optophysiology but is kept here as it is useful information for analysis, and so good to be stored w/ the actual data.", - "data_type": "float32" - } - }, - "Clustering/": { - "merge": [ - "/" - ], - "num": { - "description": "Cluster number of each event", - "data_type": "int32", - "dimensions": [ - "num_events" - ] - }, - "description": { - "description": "Description of clusters or clustering, (e.g. cluster 0 is noise, clusters curated using Klusters, etc)", - "data_type": "text" - }, - "_description": "Clustered spike data, whether from automatic clustering tools (e.g., klustakwik) or as a result of manual sorting.", - "attributes": { - "help?": { - "const": true, - "value": "Clustered spike data, whether from automatic clustering tools (eg, klustakwik) or as a result of manual sorting", - "data_type": "text" - } - }, - "cluster_nums": { - "autogen": { - "include_empty": true, - "type": "values", - "target": "num", - "qty": "*" - }, - "description": "List of cluster number that are a part of this set (cluster numbers can be non- continuous)", - "data_type": "int32", - "dimensions": [ - "num_clusters" - ] - }, - "peak_over_rms": { - "description": "Maximum ratio of waveform peak to RMS on any channel in the cluster (provides a basic clustering metric).", - "data_type": "float32", - "dimensions": [ - "num_clusters" - ] - }, - "times": { - "description": "Times of clustered events, in seconds. This may be a link to times field in associated FeatureExtraction module.", - "data_type": "float64!", - "dimensions": [ - "num_events" - ] - } - }, - "/general/": { - "surgery?": { - "description": "Narrative description about surgery/surgeries, including date(s) and who performed surgery. COMMENT: Much can be copied from Methods", - "data_type": "text" - }, - "subject/?": { - "age?": { - "description": "Age of subject", - "data_type": "text" - }, - "genotype?": { - "description": "Genetic strain COMMENT: If absent, assume Wild Type (WT)", - "data_type": "text" - }, - "subject_id?": { - "description": "ID of animal/person used/participating in experiment (lab convention)", - "data_type": "text" - }, - "_description": "Information about the animal or person from which the data was measured.", - "weight?": { - "description": "Weight at time of experiment, at time of surgery and at other important times", - "data_type": "text" - }, - "description?": { - "description": "Description of subject and where subject came from (e.g., breeder, if animal)", - "data_type": "text" - }, - "sex?": { - "description": "Gender of subject", - "data_type": "text" - }, - "species?": { - "description": "Species of subject", - "data_type": "text" - } - }, - "description": "Experimental metadata, including protocol, notes and description of hardware device(s). COMMENT: The metadata stored in this section should be used to describe the experiment. Metadata necessary for interpreting the data is stored with the data. MORE_INFO: General experimental metadata, including animal strain, experimental protocols, experimenter, devices, etc, are stored under 'general'. Core metadata (e.g., that required to interpret data fields) is stored with the data itself, and implicitly defined by the file specification (eg, time is in seconds). The strategy used here for storing non-core metadata is to use free-form text fields, such as would appear in sentences or paragraphs from a Methods section. Metadata fields are text to enable them to be more general, for example to represent ranges instead of numerical values. Machine-readable metadata is stored as attributes to these free-form datasets.

All entries in the below table are to be included when data is present. Unused groups (e.g., intracellular_ephys in an optophysiology experiment) should not be created unless there is data to store within them.", - "autogen": { - "type": "create" - }, - "source_script?": { - "attributes": { - "file_name?": { - "description": "Name of script file", - "data_type": "text" - } - }, - "description": "Script file used to create this NWB file.", - "data_type": "text" - }, - "devices/?": { - "*": { - "description": "One of possibly many. Information about device and device description. COMMENT: Name should be informative. Contents can be from Methods.", - "data_type": "text" - }, - "description": "Description of hardware devices used during experiment. COMMENT: Eg, monitors, ADC boards, microscopes, etc" - }, - "__custom?": { - "description": "Indicates that this group (general/) is the default location for custom nodes. This dataset in the format specification is just a flag. There is no actual data stored in the HDF5 file for this dataset.", - "data_type": "int" - }, - "pharmacology?": { - "description": "Description of drugs used, including how and when they were administered. COMMENT: Anesthesia(s), painkiller(s), etc., plus dosage, concentration, etc.", - "data_type": "text" - }, - "data_collection?": { - "description": "Notes about data collection and analysis.COMMENT: Can be from Methods", - "data_type": "text" - }, - "institution^": { - "description": "Institution(s) where experiment was performed", - "data_type": "text" - }, - "protocol?": { - "description": "Experimetnal protocol, if applicable.COMMENT: E.g., include IACUC protocol", - "data_type": "text" - }, - "notes?": { - "description": "Notes about the experiment. COMMENT: Things particular to this experiment", - "data_type": "text" - }, - "related_publications?": { - "description": "Publication information.COMMENT: PMID, DOI, URL, etc. If multiple, concatenate together and describe which is which. such as PMID, DOI, URL, etc", - "data_type": "text" - }, - "stimulus?": { - "description": "Notes about stimuli, such as how and where presented.COMMENT: Can be from Methods", - "data_type": "text" - }, - "specifications/?": { - "*": { - "attributes": { - "help?": { - "value": "Contents of format specification file.", - "data_type": "text" - }, - "namespaces": { - "description": "Namespaces defined in the file", - "data_type": "text", - "dimensions": [ - "num_namespaces" - ] - } - }, - "description": "Dataset for storing contents of a specification file for either the core format or an extension. Name should match name of file.`", - "data_type": "text" - }, - "description": "Group for storing format specification files." - }, - "session_id^": { - "description": "Lab-specific ID for the session.COMMENT: Only 1 session_id per file, with all time aligned to experiment start time.", - "data_type": "text" - }, - "slices?": { - "description": "Description of slices, including information about preparation thickness, orientation, temperature and bath solution", - "data_type": "text" - }, - "experimenter^": { - "description": "Name of person who performed the experiment.COMMENT: More than one person OK. Can specify roles of different people involved.", - "data_type": "text" - }, - "virus?": { - "description": "Information about virus(es) used in experiments, including virus ID, source, date made, injection location, volume, etc", - "data_type": "text" - }, - "lab^": { - "description": "Lab where experiment was performed", - "data_type": "text" - }, - "experiment_description^": { - "description": "General description of the experiment.COMMENT: Can be from Methods", - "data_type": "text" - } - }, - "/": { - "merge": [ - "/" - ], - "description": "Aliases to standard PatchClampSeries. Its functionality is to better tag PatchClampSeries for machine (and human) readability of the file.", - "attributes": { - "help?": { - "const": true, - "value": "Stimulus voltage applied during voltage clamp recording", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "PatchClampSeries", - "VoltageClampStimulusSeries" - ], - "const": true, - "dimensions": [ - "3" - ], - "data_type": "text" - } - } - }, - "/stimulus/": { - "presentation/": { - "include": { - "/*": { - "_options": { - "subclasses": true - } - } - }, - "description": "Stimuli presented during the experiment.", - "autogen": { - "type": "create" - } - }, - "description": "Data pushed into the system (eg, video stimulus, sound, voltage, etc) and secondary representations of that data (eg, measurements of something used as a stimulus) COMMENT: This group is read-only after experiment complete and timestamps are corrected to common timebase. Stores both presented stimuli and stimulus templates, the latter in case the same stimulus is presented multiple times, or is pulled from an external stimulus library.MORE_INFO: Stimuli are here defined as any signal that is pushed into the system as part of the experiment (eg, sound, video, voltage, etc). Many different experiments can use the same stimuli, and stimuli can be re-used during an experiment. The stimulus group is organized so that one version of template stimuli can be stored and these be used multiple times. These templates can exist in the present file or can be HDF5-linked to a remote library file.", - "autogen": { - "type": "create" - }, - "templates/": { - "include": { - "/*": { - "_options": { - "subclasses": true - } - } - }, - "description": "Template stimuli. COMMENT: Time stamps in templates are based on stimulus design and are relative to the beginning of the stimulus. When templates are used, the stimulus instances must convert presentation times to the experiment's time reference frame.", - "autogen": { - "type": "create" - } - } - }, - "ImageSegmentation/": { - "merge": [ - "/" - ], - "description": "Stores pixels in an image that represent different regions of interest (ROIs) or masks. All segmentation for a given imaging plane is stored together, with storage for multiple imaging planes (masks) supported. Each ROI is stored in its own subgroup, with the ROI group containing both a 2D mask and a list of pixels that make up this mask. Segments can also be used for masking neuropil. If segmentation is allowed to change with time, a new imaging plane (or module) is required and ROI names should remain consistent between them.", - "/*": { - "imaging_plane_name": { - "description": "Name of imaging plane under general/optophysiology", - "data_type": "text" - }, - "roi_list": { - "autogen": { - "trim": true, - "type": "names", - "target": "", - "qty": "*" - }, - "references": "/", - "description": "List of ROIs in this imaging plane", - "data_type": "text", - "dimensions": [ - "num_rois" - ] - }, - "_description": "Group name is human-readable description of imaging plane", - "/*": { - "img_mask": { - "description": "ROI mask, represented in 2D ([y][x]) intensity image", - "data_type": "float32", - "dimensions": [ - "num_x", - "num_y" - ] - }, - "pix_mask_weight": { - "description": "Weight of each pixel listed in pix_mask", - "data_type": "float32", - "dimensions": [ - "num_pixels" - ] - }, - "roi_description": { - "description": "Description of this ROI.", - "data_type": "text" - }, - "description": "Name of ROI", - "pix_mask": { - "description": "List of pixels (x,y) that compose the mask", - "data_type": "uint16", - "dimensions": [ - "num_pixels", - "2" - ] - } - }, - "reference_images/": { - "/+": { - "merge": [ - "/" - ], - "description": "One or more image stacks that the masks apply to (can be one-element stack)" - }, - "description": "Stores image stacks segmentation mask apply to." - }, - "description^": { - "description": "Description of image plane, recording wavelength, depth, etc", - "data_type": "text" - } - }, - "attributes": { - "help?": { - "const": true, - "value": [ - "Stores groups of pixels that define regions of interest from one or more imaging planes" - ], - "data_type": "text" - } - } - }, - "/": { - "merge": [ - "/" - ], - "data": { - "attributes": { - "unit": { - "value": "volt", - "data_type": "text" - } - }, - "description": "Spike waveforms.", - "data_type": "float32", - "dimensions": [ - [ - "num_events", - "num_samples" - ], - [ - "num_events", - "num_channels", - "num_samples" - ] - ] - }, - "description": "Stores \"snapshots\" of spike events (i.e., threshold crossings) in data. This may also be raw data, as reported by ephys hardware. If so, the TimeSeries::description field should describing how events were detected. All SpikeEventSeries should reside in a module (under EventWaveform interface) even if the spikes were reported and stored by hardware. All events span the same recording channels and store snapshots of equal duration. TimeSeries::data array structure: :blue:`[num events] [num channels] [num samples] (or [num events] [num samples] for single electrode)`.", - "attributes": { - "help?": { - "const": true, - "value": "Snapshots of spike events from data.", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "ElectricalSeries", - "SpikeEventSeries" - ], - "const": true, - "dimensions": [ - "3" - ], - "data_type": "text" - } - } - }, - "MotionCorrection/": { - "merge": [ - "/" - ], - "/+": { - "original_path": { - "autogen": { - "trim": false, - "format": "$t", - "type": "link_path", - "target": "original/", - "qty": "!" - }, - "description": "Path to linked original timeseries", - "data_type": "text" - }, - "original/": { - "link": { - "allow_subclasses": true, - "target_type": "/" - }, - "description": "HDF5 Link to image series that is being registered." - }, - "corrected/": { - "merge": [ - "/" - ], - "description": "Image stack with frames shifted to the common coordinates." - }, - "description": "One of possibly many. Name should be informative.", - "xy_translation/": { - "merge": [ - "/" - ], - "data": { - "xy": { - "type": "structure", - "components": [ - { - "alias": "x", - "unit": "pixels" - }, - { - "alias": "y", - "unit": "pixels" - } - ] - }, - "description": "TimeSeries for storing x,y offset for each image frame.", - "data_type": "float", - "dimensions": [ - "num_times", - "xy" - ] - }, - "description": "Stores the x,y delta necessary to align each frame to the common coordinates, for example, to align each frame to a reference image." - } - }, - "description": "An image stack where all frames are shifted (registered) to a common coordinate system, to account for movement and drift between frames. Note: each frame at each point in time is assumed to be 2-D (has only x & y dimensions).", - "attributes": { - "help?": { - "const": true, - "value": "Image stacks whose frames have been shifted (registered) to account for motion", - "data_type": "text" - } - } - }, - "/": { - "indexed_timeseries_path": { - "autogen": { - "trim": false, - "format": "path is $t", - "type": "link_path", - "target": "indexed_timeseries/", - "qty": "!" - }, - "description": "Path to linked TimeSeries", - "data_type": "text" - }, - "description": "Stores indices to image frames stored in an ImageSeries. The purpose of the ImageIndexSeries is to allow a static image stack to be stored somewhere, and the images in the stack to be referenced out-of-order. This can be for the display of individual images, or of movie segments (as a movie is simply a series of images). The data field stores the index of the frame in the referenced ImageSeries, and the timestamps array indicates when that image was displayed.", - "indexed_timeseries/": { - "link": { - "allow_subclasses": false, - "target_type": "/" - }, - "description": "HDF5 link to TimeSeries containing images that are indexed." - }, - "merge": [ - "/" - ], - "attributes": { - "help?": { - "const": true, - "value": "A sequence that is generated from an existing image stack. Frames can be presented in an arbitrary order. The data[] field stores frame number in reference stack", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "IndexSeries" - ], - "const": true, - "dimensions": [ - "2" - ], - "data_type": "text" - } - }, - "data": { - "references": "indexed_timeseries/data.num_times", - "dimensions": [ - "num_times" - ], - "data_type": "int", - "description": "Index of the frame in the referenced ImageSeries." - } - }, - "Fluorescence/": { - "merge": [ - "/" - ], - "include": { - "/+": {} - }, - "description": "Fluorescence information about a region of interest (ROI). Storage hierarchy of fluorescence should be the same as for segmentation (ie, same names for ROIs and for image planes).", - "attributes": { - "help?": { - "const": true, - "value": "Fluorescence over time of one or more ROIs. TimeSeries names should correspond to imaging plane names", - "data_type": "text" - } - } - }, - "Position/": { - "merge": [ - "/" - ], - "include": { - "/+": {} - }, - "description": "Position data, whether along the x, x/y or x/y/z axis.", - "attributes": { - "help?": { - "const": true, - "value": "Position data, whether along the x, xy or xyz axis", - "data_type": "text" - } - } - }, - "/": { - "starting_time?": { - "attributes": { - "rate": { - "description": "Sampling rate, in Hz COMMENT: Rate information is stored in Hz", - "data_type": "float32!" - }, - "unit": { - "value": "Seconds", - "description": "The string \"Seconds\"COMMENT: All timestamps in the file are stored in seconds. Specifically, this is the number of seconds since the start of the experiment (i.e., since session_start_time)", - "data_type": "text", - "const": true - } - }, - "description": "The timestamp of the first sample. COMMENT: When timestamps are uniformly spaced, the timestamp of the first sample can be specified and all subsequent ones calculated from the sampling rate.", - "data_type": "float64!" - }, - "_required": { - "control": [ - "(control AND control_description) OR (NOT control AND NOT control_description)", - "If either control or control_description are present, then both must be present." - ], - "start_time": [ - "starting_time XOR timestamps", - "Either starting_time or timestamps must be present, but not both." - ] - }, - "control?": { - "references": "control_description.num_control_values", - "description": "Numerical labels that apply to each element in data[]. COMMENT: Optional field. If present, the control array should have the same number of elements as data[].", - "data_type": "uint8", - "dimensions": [ - "num_times" - ] - }, - "control_description?": { - "dimensions": [ - "num_control_values" - ], - "data_type": "text", - "description": "Description of each control value. COMMENT: Array length should be as long as the highest number in control minus one, generating an zero-based indexed array for control values." - }, - "description": "General purpose time series.", - "timestamps": { - "attributes": { - "interval": { - "value": 1, - "const": true, - "description": "The number of samples between each timestamp. COMMENT: Presently this value is restricted to 1 (ie, a timestamp for each sample)", - "data_type": "int32" - }, - "unit": { - "value": "Seconds", - "description": "The string \"Seconds\" COMMENT: All timestamps in the file are stored in seconds. Specifically, this is the number of seconds since the start of the experiment (i.e., since session_start_time)", - "data_type": "text", - "const": true - } - }, - "description": "Timestamps for samples stored in data.COMMENT: Timestamps here have all been corrected to the common experiment master-clock. Time is stored as seconds and all timestamps are relative to experiment start time.", - "data_type": "float64!", - "dimensions": [ - "num_times" - ] - }, - "attributes": { - "source": { - "description": "Name of TimeSeries or Modules that serve as the source for the data contained here. It can also be the name of a device, for stimulus or acquisition data", - "data_type": "text" - }, - "neurodata_type": { - "const": true, - "value": "TimeSeries", - "data_type": "text" - }, - "ancestry": { - "const": true, - "value": [ - "TimeSeries" - ], - "data_type": "text", - "description": "The class-hierarchy of this TimeSeries, with one entry in the array for each ancestor. An alternative and equivalent description is that this TimeSeries object contains the datasets defined for all of the TimeSeries classes listed. The class hierarchy is described more fully below. COMMENT: For example: [0]=TimeSeries, [1]=ElectricalSeries [2]=PatchClampSeries. The hierarchical order should be preserved in the array -- i.e., the parent object of subclassed element N in the array should be element N-1" - }, - "comments^": { - "description": "Human-readable comments about the TimeSeries. This second descriptive field can be used to store additional information, or descriptive information if the primary description field is populated with a computer-readable string.", - "data_type": "text" - }, - "extern_fields^": { - "autogen": { - "type": "extern", - "qty": "*" - }, - "description": "List of fields that are HDF5 external links.COMMENT: Only present if one or more datasets is set to an HDF5 external link.", - "data_type": "text", - "dimensions": [ - "num_extern_fields" - ] - }, - "description^": { - "description": "Description of TimeSeries", - "data_type": "text" - }, - "help?": { - "value": "General time series object", - "const": true, - "description": "Short description indicating what this type of TimeSeries stores.", - "data_type": "text" - }, - "data_link": { - "autogen": { - "trim": true, - "type": "links", - "target": "data", - "qty": "*" - }, - "description": "A sorted list of the paths of all TimeSeries that share a link to the same data field. Example element of list: \"/stimulus/presentation/Sweep_0\"` COMMENT: Attribute is only present if links are present. List should include the path to this TimeSeries also.", - "data_type": "text", - "dimensions": [ - "num_dlinks" - ] - }, - "missing_fields^": { - "autogen": { - "type": "missing", - "qty": "*" - }, - "description": "List of fields that are not optional (i.e. either required or recommended parts of the TimeSeries) that are missing. COMMENT: Only present if one or more required or recommended fields are missing. Note that a missing required field (such as data or timestamps) should generate an error by the API", - "data_type": "text", - "dimensions": [ - "num_missing_fields" - ] - }, - "timestamp_link": { - "autogen": { - "trim": true, - "type": "links", - "target": "timestamps", - "qty": "*" - }, - "description": "A sorted list of the paths of all TimeSeries that share a link to the same timestamps field. Example element of list: \"/acquisition/timeseries/lick_trace\" COMMENT: Attribute is only present if links are present. List should include the path to this TimeSeries also.", - "data_type": "text", - "dimensions": [ - "num_tslinks" - ] - } - }, - "num_samples": { - "autogen": { - "type": "length", - "target": "timestamps" - }, - "description": "Number of samples in data, or number of image frames. COMMENT: This is important if the length of timestamp and data are different, such as for externally stored stimulus image stacks", - "data_type": "int32" - }, - "data": { - "attributes": { - "conversion": { - "data_type": "float32!", - "description": "Scalar to multiply each element in data to convert it to the specified unit", - "value": 1.0 - }, - "resolution": { - "data_type": "float32!", - "description": "Smallest meaningful difference between values in data, stored in the specified by unit. COMMENT: E.g., the change in value of the least significant bit, or a larger number if signal noise is known to be present. If unknown, use NaN", - "value": 0.0 - }, - "unit": { - "description": "The base unit of measure used to store data. This should be in the SI unit. COMMENT: This is the SI unit (when appropriate) of the stored data, such as Volts. If the actual data is stored in millivolts, the field 'conversion' below describes how to convert the data to the specified SI unit.", - "data_type": "text" - } - }, - "description": "Data values. Can also store binary data (eg, image frames) COMMENT: This field may be a link to data stored in an external file, especially in the case of raw data.", - "data_type": "any", - "dimensions": [ - "num_times" - ] - }, - "sync/?": { - "description": "Lab specific time and sync information as provided directly from hardware devices and that is necessary for aligning all acquired time information to a common timebase. The timestamp array stores time in the common timebase. COMMENT: This group will usually only be populated in TimeSeries that are stored external to the NWB file, in files storing raw data. Once timestamp data is calculated, the contents of 'sync' are mostly for archival purposes." - } - }, - "/": { - "merge": [ - "/" - ], - "data": { - "attributes": { - "conversion": { - "const": true, - "description": "Value is float('NaN') (const) since this does not apply.", - "value": "float('NaN')" - }, - "resolution": { - "const": true, - "description": "Value is float('nan') (const) since this does not apply", - "value": "float('NaN')" - }, - "unit": { - "const": true, - "description": "Value is \"n/a\" to indicate that this does not apply", - "value": "n/a" - } - }, - "description": "Annotations made during an experiment.", - "data_type": "text", - "dimensions": [ - "num_times" - ] - }, - "description": "Stores, eg, user annotations made during an experiment. The TimeSeries::data[] field stores a text array, and timestamps are stored for each annotation (ie, interval=1). This is largely an alias to a standard TimeSeries storing a text array but that is identifiable as storing annotations in a machine-readable way.", - "attributes": { - "help?": { - "const": true, - "value": "Time-stamped annotations about an experiment", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "AnnotationSeries" - ], - "const": true, - "dimensions": [ - "2" - ], - "data_type": "text" - } - } - }, - "/": { - "merge": [ - "/" - ], - "masked_imageseries/": { - "link": { - "allow_subclasses": false, - "target_type": "/" - }, - "description": "Link to ImageSeries that mask is applied to." - }, - "masked_imageseries_path": { - "autogen": { - "trim": false, - "format": "path is $t", - "type": "link_path", - "target": "masked_imageseries/", - "qty": "!" - }, - "description": "Path to linked ImageSeries", - "data_type": "text" - }, - "description": "An alpha mask that is applied to a presented visual stimulus. The data[] array contains an array of mask values that are applied to the displayed image. Mask values are stored as RGBA. Mask can vary with time. The timestamps array indicates the starting time of a mask, and that mask pattern continues until it's explicitly changed.", - "attributes": { - "help?": { - "const": true, - "value": "An alpha mask that is applied to a presented visual stimulus", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "ImageSeries", - "ImageMaskSeries" - ], - "const": true, - "dimensions": [ - "3" - ], - "data_type": "text" - } - } - }, - "EventWaveform/": { - "merge": [ - "/" - ], - "include": { - "/*": {} - }, - "description": "Represents either the waveforms of detected events, as extracted from a raw data trace in /acquisition, or the event waveforms that were stored during experiment acquisition.", - "attributes": { - "help?": { - "const": true, - "value": "Waveform of detected extracellularly recorded spike events", - "data_type": "text" - } - } - }, - "/general/optophysiology/?": { - "description": "Metadata related to optophysiology.", - "/*": { - "reference_frame": { - "description": "Describes position and reference frame of manifold based on position of first element in manifold. For example, text description of anotomical location or vectors needed to rotate to common anotomical axis (eg, AP/DV/ML). COMMENT: This field is necessary to interpret manifold. If manifold is not present then this field is not required", - "data_type": "text" - }, - "indicator": { - "description": "Calcium indicator", - "data_type": "text" - }, - "manifold": { - "attributes": { - "conversion": { - "value": 1.0, - "description": "Multiplier to get from stored values to specified unit (e.g., 1e-3 for millimeters)", - "data_type": "float" - }, - "unit": { - "value": "Meter", - "description": "Base unit that coordinates are stored in (e.g., Meters)", - "data_type": "text" - } - }, - "xyz": { - "type": "struct", - "components": [ - { - "alias": "x", - "unit": "Meter" - }, - { - "alias": "y", - "unit": "Meter" - }, - { - "alias": "z", - "unit": "Meter" - } - ] - }, - "description": "Physical position of each pixel. COMMENT: \"xyz\" represents the position of the pixel relative to the defined coordinate space", - "data_type": "float32", - "dimensions": [ - "height", - "weight", - "xyz" - ] - }, - "imaging_rate": { - "description": "Rate images are acquired, in Hz.", - "data_type": "text" - }, - "_description": "One of possibly many groups describing an imaging plane. COMMENT: Name is arbitrary but should be meaningful. It is referenced by TwoPhotonSeries and also ImageSegmentation and DfOverF interfaces", - "device": { - "references": "/general/devices//", - "description": "Name of device in /general/devices", - "data_type": "text" - }, - "description?": { - "description": "Description of <image_plane_X>", - "data_type": "text" - }, - "excitation_lambda": { - "description": "Excitation wavelength", - "data_type": "text" - }, - "/": { - "description": { - "description": "Any notes or comments about the channel", - "data_type": "text" - }, - "emission_lambda": { - "description": "Emission lambda for channel", - "data_type": "text" - }, - "_description": "One of possibly many groups storing channel-specific data COMMENT: Name is arbitrary but should be meaningful" - }, - "location": { - "description": "Location of image plane", - "data_type": "text" - } - } - }, - "/general/optogenetics/?": { - "description": "Metadata describing optogenetic stimulation", - "/*": { - "device": { - "references": "/general/devices//", - "description": "Name of device in /general/devices", - "data_type": "text" - }, - "excitation_lambda": { - "description": "Excitation wavelength", - "data_type": "text" - }, - "location": { - "description": "Location of stimulation site", - "data_type": "text" - }, - "description": { - "description": "Description of site", - "data_type": "text" - }, - "_description": "One of possibly many groups describing an optogenetic stimulation site. COMMENT: Name is arbitrary but should be meaningful. Name is referenced by OptogeneticSeries" - } - }, - "FeatureExtraction/": { - "description": { - "description": "Description of features (eg, \"PC1\") for each of the extracted features.", - "data_type": "text", - "dimensions": [ - "num_features" - ] - }, - "_description": "Features, such as PC1 and PC2, that are extracted from signals stored in a SpikeEvent TimeSeries or other source.", - "electrode_idx": { - "references": "/general/extracellular_ephys/electrode_map.num_electrodes", - "description": "Indices (zero-based) to electrodes described in the experiment's electrode map array (under /general/extracellular_ephys).", - "data_type": "int32", - "dimensions": [ - "num_channels" - ] - }, - "times": { - "description": "Times of events that features correspond to (can be a link).", - "data_type": "float64!", - "dimensions": [ - "num_events" - ] - }, - "merge": [ - "/" - ], - "attributes": { - "help?": { - "const": true, - "value": "Container for salient features of detected events", - "data_type": "text" - } - }, - "features": { - "description": "Multi-dimensional array of features extracted from each event.", - "data_type": "float32", - "dimensions": [ - "num_events", - "num_channels", - "num_features" - ] - } - }, - "/": { - "description": "Top level of NWB file.", - "file_create_date": { - "description": "Time file was created, UTC, and subsequent modifications to file. COMMENT: Date + time, Use ISO format (eg, ISO 8601) or a format that is easy to read and unambiguous. File can be created after the experiment was run, so this may differ from experiment start time. Each modifictation to file adds new entry to array. ", - "data_type": "text", - "dimensions": [ - "*unlimited*" - ] - }, - "identifier": { - "description": "A unique text identifier for the file. COMMENT: Eg, concatenated lab name, file creation date/time and experimentalist, or a hash of these and/or other values. The goal is that the string should be unique to all other files.", - "data_type": "text" - }, - "session_description": { - "description": "One or two sentences describing the experiment and data in the file.", - "data_type": "text" - }, - "session_start_time": { - "description": "Time of experiment/session start, UTC. COMMENT: Date + time, Use ISO format (eg, ISO 8601) or an easy-to-read and unambiguous format. All times stored in the file use this time as reference (ie, time zero)", - "data_type": "text" - }, - "nwb_version": { - "description": "File version string. COMMENT: Eg, NWB-1.0.0. This will be the name of the format with trailing major, minor and patch numbers.", - "data_type": "text" - } - }, - "PupilTracking/": { - "merge": [ - "/" - ], - "include": { - "/+": {} - }, - "description": "Eye-tracking data, representing pupil size.", - "attributes": { - "help?": { - "const": true, - "value": "Eye-tracking data, representing pupil size", - "data_type": "text" - } - } - }, - "/": { - "features": { - "description": "Description of the features represented in TimeSeries::data.", - "data_type": "text", - "dimensions": [ - "num_features" - ] - }, - "feature_units^": { - "description": "Units of each feature.", - "data_type": "text", - "dimensions": [ - "num_features" - ] - }, - "merge": [ - "/" - ], - "attributes": { - "help?": { - "const": true, - "value": "Features of an applied stimulus. This is useful when storing the raw stimulus is impractical", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "AbstractFeatureSeries" - ], - "const": true, - "dimensions": [ - "2" - ], - "data_type": "text" - } - }, - "data": { - "attributes": { - "unit": { - "value": "see 'feature_units'" - } - }, - "description": "Values of each feature at each time.", - "data_type": "float32", - "dimensions": [ - "num_times", - "num_features" - ] - }, - "description": "Abstract features, such as quantitative descriptions of sensory stimuli. The TimeSeries::data field is a 2D array, storing those features (e.g., for visual grating stimulus this might be orientation, spatial frequency and contrast). Null stimuli (eg, uniform gray) can be marked as being an independent feature (eg, 1.0 for gray, 0.0 for actual stimulus) or by storing NaNs for feature values, or through use of the TimeSeries::control fields. A set of features is considered to persist until the next set of features is defined. The final set of features stored should be the null set." - }, - "/": { - "description": "ROI responses over an imaging plane. Each row in data[] should correspond to the signal from one ROI.", - "segmentation_interface/": { - "link": { - "allow_subclasses": false, - "target_type": "ImageSegmentation/" - }, - "description": "HDF5 link to image segmentation module defining ROIs." - }, - "segmentation_interface_path": { - "autogen": { - "trim": false, - "format": "$t", - "type": "link_path", - "target": "segmentation_interface/", - "qty": "!" - }, - "description": "Path to segmentation module.", - "data_type": "text" - }, - "roi_names": { - "description": "List of ROIs represented, one name for each row of data[].", - "data_type": "text", - "dimensions": [ - "num_ROIs" - ] - }, - "merge": [ - "/" - ], - "attributes": { - "help?": { - "const": true, - "value": "ROI responses over an imaging plane. Each row in data[] should correspond to the signal from one ROI", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "RoiResponseSeries" - ], - "const": true, - "dimensions": [ - "2" - ], - "data_type": "text" - } - }, - "data": { - "description": "Signals from ROIs", - "data_type": "float32", - "dimensions": [ - "num_times", - "num_ROIs" - ] - } - }, - "/": { - "merge": [ - "/" - ], - "reference_frame^": { - "description": "Description defining what exactly 'straight-ahead' means.", - "data_type": "text" - }, - "data": { - "attributes": { - "unit": { - "value": "meter", - "data_type": "text" - } - }, - "description": "2-D array storing position or direction relative to some reference frame.", - "data_type": "number", - "dimensions": [ - "num_times", - "num_features" - ] - }, - "description": "Direction, e.g., of gaze or travel, or position. The TimeSeries::data field is a 2D array storing position or direction relative to some reference frame. Array structure: [num measurements] [num dimensions]. Each SpatialSeries has a text dataset reference_frame that indicates the zero-position, or the zero-axes for direction. For example, if representing gaze direction, \"straight-ahead\" might be a specific pixel on the monitor, or some other point in space. For position data, the 0,0 point might be the top-left corner of an enclosure, as viewed from the tracking camera. The unit of data will indicate how to interpret SpatialSeries values.", - "attributes": { - "help?": { - "const": true, - "value": "Stores points in space over time. The data[] array structure is [num samples][num spatial dimensions]", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "SpatialSeries" - ], - "const": true, - "dimensions": [ - "2" - ], - "data_type": "text" - } - } - }, - "/": { - "merge": [ - "/" - ], - "capacitance_fast^": { - "attributes": { - "unit": { - "value": "Farad", - "data_type": "text" - } - }, - "description": "Unit: Farad", - "data_type": "float32" - }, - "resistance_comp_bandwidth^": { - "attributes": { - "unit": { - "value": "Hz", - "data_type": "text" - } - }, - "description": "Unit: Hz", - "data_type": "float32" - }, - "whole_cell_series_resistance_comp^": { - "attributes": { - "unit": { - "value": "Ohm", - "data_type": "text" - } - }, - "description": "Unit: Ohm", - "data_type": "float32" - }, - "description": "Stores current data recorded from intracellular voltage-clamp recordings. A corresponding VoltageClampStimulusSeries (stored separately as a stimulus) is used to store the voltage injected.", - "resistance_comp_correction^": { - "attributes": { - "unit": { - "value": "pecent", - "data_type": "text" - } - }, - "description": "Unit: %", - "data_type": "float32" - }, - "attributes": { - "help?": { - "const": true, - "value": "Current recorded from cell during voltage-clamp recording", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "PatchClampSeries", - "VoltageClampSeries" - ], - "const": true, - "dimensions": [ - "3" - ], - "data_type": "text" - } - }, - "capacitance_slow^": { - "attributes": { - "unit": { - "value": "Farad", - "data_type": "text" - } - }, - "description": "Unit: Farad", - "data_type": "float32" - }, - "resistance_comp_prediction^": { - "attributes": { - "unit": { - "value": "pecent", - "data_type": "text" - } - }, - "description": "Unit: %", - "data_type": "float32" - }, - "whole_cell_capacitance_comp^": { - "attributes": { - "unit": { - "value": "Farad", - "data_type": "text" - } - }, - "description": "Unit: Farad", - "data_type": "float32" - } - }, - "/acquisition/": { - "timeseries/": { - "include": { - "/*": { - "_options": { - "subclasses": true - } - } - }, - "description": "Acquired TimeSeries.COMMENT: When importing acquisition data to an NWB file, all acquisition/tracking/stimulus data must already be aligned to a common time frame. It is assumed that this task has already been performed.", - "autogen": { - "type": "create" - } - }, - "images/": { - "*": { - "attributes": { - "description^": { - "description": "Human description of image. COMMENT: If image is of slice data, include slice thickness and orientation, and reference to appropriate entry in /general/slices", - "data_type": "text" - }, - "format": { - "description": "Format of the image. COMMENT: eg, jpg, png, mpeg", - "data_type": "text" - } - }, - "description": "Photograph of experiment or experimental setup (video also OK). COMMENT: Name is arbitrary. Data is stored as a single binary object (HDF5 opaque type).", - "data_type": "binary" - }, - "description": "Acquired images", - "autogen": { - "type": "create" - } - }, - "description": "Data streams recorded from the system, including ephys, ophys, tracking, etc. COMMENT: This group is read-only after the experiment is completed and timestamps are corrected to a common timebase. The data stored here may be links to raw data stored in external HDF5 files. This will allow keeping bulky raw data out of the file while preserving the option of keeping some/all in the file. MORE_INFO: Acquired data includes tracking and experimental data streams (ie, everything measured from the system).If bulky data is stored in the /acquisition group, the data can exist in a separate HDF5 file that is linked to by the file being used for processing and analysis.", - "autogen": { - "type": "create" - } - }, - "ClusterWaveforms/": { - "merge": [ - "/" - ], - "waveform_sd": { - "description": "Stdev of waveforms for each cluster, using the same indices as in mean", - "data_type": "float32", - "dimensions": [ - "num_clusters", - "num_samples" - ] - }, - "waveform_filtering": { - "description": "Filtering applied to data before generating mean/sd", - "data_type": "text" - }, - "description": "The mean waveform shape, including standard deviation, of the different clusters. Ideally, the waveform analysis should be performed on data that is only high-pass filtered. This is a separate module because it is expected to require updating. For example, IMEC probes may require different storage requirements to store/display mean waveforms, requiring a new interface or an extension of this one.", - "clustering_interface_path": { - "autogen": { - "trim": false, - "format": "path is $t", - "type": "link_path", - "target": "clustering_interface/", - "qty": "!" - }, - "description": "Path to linked clustering interface", - "data_type": "text" - }, - "clustering_interface/": { - "link": { - "allow_subclasses": false, - "target_type": "Clustering/" - }, - "description": "HDF5 link to Clustering interface that was the source of the clustered data" - }, - "attributes": { - "help?": { - "const": true, - "value": "Mean waveform shape of clusters. Waveforms should be high-pass filtered (ie, not the same bandpass filter used waveform analysis and clustering)", - "data_type": "text" - } - }, - "waveform_mean": { - "description": "The mean waveform for each cluster, using the same indices for each wave as cluster numbers in the associated Clustering module (i.e, cluster 3 is in array slot [3]). Waveforms corresponding to gaps in cluster sequence should be empty (e.g., zero- filled)", - "data_type": "float32", - "dimensions": [ - "num_clusters", - "num_samples" - ] - } - }, - "/general/extracellular_ephys/?": { - "/": { - "device": { - "references": "/general/devices/devices//", - "description": "Name of device(s) in /general/devices", - "data_type": "text" - }, - "location": { - "description": "Description of probe locationCOMMENT: E.g., stereotaxic coordinates and other data, e.g., drive placement, angle and orientation and tetrode location in drive and tetrode depth", - "data_type": "text" - }, - "description": { - "description": "Description of probe or shank", - "data_type": "text" - }, - "_description": "One of possibly many groups, one for each electrode group. If the groups have a hierarchy, such as multiple probes each having multiple shanks, that hierarchy can be mirrored here, using groups for electrode_probe_X and subgroups for electrode_group_X.COMMENT: Name is arbitrary but should be meaningful." - }, - "filtering": { - "description": "Description of filtering used. COMMENT: Includes filtering type and parameters, frequency fall- off, etc. If this changes between TimeSeries, filter description should be stored as a text attribute for each TimeSeries. If this changes between TimeSeries, filter description should be stored as a text attribute for each TimeSeries.", - "data_type": "text" - }, - "description": "Metadata related to extracellular electrophysiology.", - "electrode_group": { - "references": "/", - "description": "Identification string for probe, shank or tetrode each electrode resides on. Name should correspond to one of electrode_group_X groups below. COMMENT: There's one entry here for each element in electrode_map. All elements in an electrode group should have a functional association, for example all being on the same planar electrode array, or on the same shank.", - "data_type": "text", - "dimensions": [ - "num_electrodes" - ] - }, - "electrode_map": { - "xyz": { - "type": "struct", - "components": [ - { - "alias": "x", - "unit": "meter" - }, - { - "alias": "y", - "unit": "meter" - }, - { - "alias": "z", - "unit": "meter" - } - ] - }, - "description": "Physical location of electrode, (x,y,z in meters) COMMENT: Location of electrodes relative to one another. This records the points in space. If an electrode is moved, it needs a new entry in the electrode map for its new location. Otherwise format doesn't support using the same electrode in a new location, or processing spikes pre/post drift.", - "data_type": "number", - "dimensions": [ - "num_electrodes", - "xyz" - ] - }, - "impedance": { - "description": "Impedence of electrodes listed in electrode_map. COMMENT: Text, in the event that impedance is stored as range and not a fixed value", - "data_type": "text", - "dimensions": [ - "num_electrodes" - ] - } - }, - "/": { - "merge": [ - "/" - ], - "description": "Aliases to standard PatchClampSeries. Its functionality is to better tag PatchClampSeries for machine (and human) readability of the file.", - "attributes": { - "help?": { - "const": true, - "value": "Stimulus current applied during current clamp recording", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "PatchClampSeries", - "CurrentClampStimulusSeries" - ], - "const": true, - "dimensions": [ - "3" - ], - "data_type": "text" - } - } - }, - "DfOverF/": { - "merge": [ - "/" - ], - "include": { - "/*": {} - }, - "description": "dF/F information about a region of interest (ROI). Storage hierarchy of dF/F should be the same as for segmentation (ie, same names for ROIs and for image planes).", - "attributes": { - "help?": { - "const": true, - "value": "Df/f over time of one or more ROIs. TimeSeries names should correspond to imaging plane names", - "data_type": "text" - } - } - }, - "/analysis/": { - "description": "Lab-specific and custom scientific analysis of data. There is no defined format for the content of this group - the format is up to the individual user/lab. COMMENT: To facilitate sharing analysis data between labs, the contents here should be stored in standard types (eg, INCF types) and appropriately documented. MORE_INFO: The file can store lab-specific and custom data analysis without restriction on its form or schema, reducing data formatting restrictions on end users. Such data should be placed in the analysis group. The analysis data should be documented so that it could be shared with other labs", - "autogen": { - "type": "create" - } - }, - "/": { - "electrode_idx": { - "references": "/general/extracellular_ephys/electrode_map.num_electrodes", - "description": "Indicies (zero-based) to electrodes in general/extracellular_ephys/electrode_map.", - "data_type": "int32", - "dimensions": [ - "num_channels" - ] - }, - "merge": [ - "/" - ], - "data": { - "attributes": { - "unit": { - "value": "volt", - "data_type": "text" - } - }, - "description": "Recorded voltage data.", - "data_type": "number", - "dimensions": [ - [ - "num_times" - ], - [ - "num_times", - "num_channels" - ] - ] - }, - "description": "Stores acquired voltage data from extracellular recordings. The data field of an ElectricalSeries is an int or float array storing data in Volts. TimeSeries::data array structure: :blue:`[num times] [num channels] (or [num_times] for single electrode).`", - "attributes": { - "help?": { - "const": true, - "value": "Stores acquired voltage data from extracellular recordings", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "ElectricalSeries" - ], - "const": true, - "dimensions": [ - "2" - ], - "data_type": "text" - } - } - }, - "/epochs/": { - "attributes": { - "tags": { - "autogen": { - "trim": true, - "include_empty": true, - "type": "values", - "target": "/tags", - "qty": "*" - }, - "description": "A sorted list of the different tags used by epochs. COMMENT:This is a sorted list of all tags that are in any of the <epoch_X>/tags datasets`.", - "data_type": "text", - "dimensions": [ - "num_tags" - ] - } - }, - "/*": { - "_description": "One of possibly many different experimental epochCOMMENT: Name is arbitrary but must be unique within the experiment.", - "start_time": { - "unit": "second", - "description": "Start time of epoch, in seconds", - "data_type": "float64!" - }, - "tags?": { - "description": "User-defined tags used throughout the epochs. Tags are to help identify or categorize epochs. COMMENT: E.g., can describe stimulus (if template) or behavioral characteristic (e.g., \"lick left\")", - "data_type": "text", - "dimensions": [ - "num_tags" - ] - }, - "stop_time": { - "unit": "second", - "description": "Stop time of epoch, in seconds", - "data_type": "float64!" - }, - "description?": { - "description": "Description of this epoch (<epoch_X>).", - "data_type": "text" - }, - "attributes": { - "neurodata_type": { - "value": "Epoch", - "const": true, - "description": "The string \"Epoch\"", - "data_type": "text" - }, - "links": { - "autogen": { - "trim": true, - "target": "/timeseries", - "format": "'$s' is '$t'", - "qty": "*", - "include_empty": true, - "type": "link_path" - }, - "description": "A sorted list mapping TimeSeries entries in the epoch to the path of the TimeSeries within the file. Each entry in the list has the following format: \"'<TimeSeries_X>' is 'path_to_TimeSeries'\", where <TimeSeries_X> is the name assigned to group <TimeSeries_X> (below). Note that the name and path are both enclosed in single quotes and the word \"is\" (with a single space before and after) separate them. Example list element: \"'auditory_cue' is '/stimulus/presentation/auditory_cue'\".", - "data_type": "text", - "dimensions": [ "num_links" ] - } - }, - "/*": { - "count": { - "description": "Number of data samples available in this time series, during this epoch.", - "data_type": "int32" - }, - "timeseries/": { - "link": { - "allow_subclasses": true, - "target_type": "/" - }, - "description": "Link to TimeSeries. An HDF5 soft-link should be used." - }, - "description": "One of possibly many input or output streams recorded during epoch. COMMENT: Name is arbitrary and does not have to match the TimeSeries that it refers to.", - "idx_start": { - "description": "Epoch's start index in TimeSeries data[] field. COMMENT: This can be used to calculate location in TimeSeries timestamp[] field", - "data_type": "int32" - } - } - }, - "description": "Experimental intervals, whether that be logically distinct sub-experiments having a particular scientific goal, trials during an experiment, or epochs deriving from analysis of data. COMMENT: Epochs provide pointers to time series that are relevant to the epoch, and windows into the data in those time series (i.e., the start and end indices of TimeSeries::data[] that overlap with the epoch). This allows easy access to a range of data in specific experimental intervals. MORE_INFO: An experiment can be separated into one or many logical intervals, with the order and duration of these intervals often definable before the experiment starts. In this document, and in the context of NWB, these intervals are called 'epochs'. Epochs have acquisition and stimulus data associated with them, and different epochs can overlap. Examples of epochs are the time when a rat runs around an enclosure or maze as well as intervening sleep sessions; the presentation of a set of visual stimuli to a mouse running on a wheel; or the uninterrupted presentation of current to a patch-clamped cell. Epochs can be limited to the interval of a particular stimulus, or they can span multiple stimuli. Different windows into the same time series can be achieved by including multiple instances of that time series, each with different start/stop times.", - "autogen": { - "type": "create" - } - }, - "/": { - "description": "Stores voltage data recorded from intracellular current-clamp recordings. A corresponding CurrentClampStimulusSeries (stored separately as a stimulus) is used to store the current injected.", - "merge": [ - "/" - ], - "bridge_balance^": { - "description": "Unit: Ohm", - "data_type": "float32" - }, - "bias_current^": { - "description": "Unit: Amp", - "data_type": "float32" - }, - "attributes": { - "help?": { - "const": true, - "value": "Voltage recorded from cell during current-clamp recording", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "PatchClampSeries", - "CurrentClampSeries" - ], - "const": true, - "dimensions": [ - "3" - ], - "data_type": "text" - } - }, - "capacitance_compensation^": { - "description": "Unit: Farad", - "data_type": "float32" - } - }, - "/": { - "field_of_view^": { - "fov": { - "type": "structure", - "components": [ - [ - { - "alias": "width", - "unit": "meter" - }, - { - "alias": "height", - "unit": "meter" - } - ], - [ - { - "alias": "width", - "unit": "meter" - }, - { - "alias": "height", - "unit": "meter" - }, - { - "alias": "depth", - "unit": "meter" - } - ] - ] - }, - "description": "Width, height and depto of image, or imaged area (meters).", - "data_type": "float32", - "dimensions": [ - "fov" - ] - }, - "description": "Image data that is presented or recorded. A stimulus template movie will be stored only as an image. When the image is presented as stimulus, additional data is required, such as field of view (eg, how much of the visual field the image covers, or how what is the area of the target being imaged). If the OpticalSeries represents acquired imaging data, orientation is also important.", - "distance^": { - "description": "Distance from camera/monitor to target/eye.", - "data_type": "float32" - }, - "merge": [ - "/" - ], - "orientation^": { - "description": "Description of image relative to some reference frame (e.g., which way is up). Must also specify frame of reference.", - "data_type": "text" - }, - "attributes": { - "help?": { - "const": true, - "value": "Time-series image stack for optical recording or stimulus", - "data_type": "text" - }, - "ancestry": { - "value": [ - "TimeSeries", - "ImageSeries", - "OpticalSeries" - ], - "const": true, - "dimensions": [ - "3" - ], - "data_type": "text" - } - } - }, - "/": { - "attributes": { - "source": { - "references": "/", - "description": "Path to the origin of the data represented in this interface.", - "data_type": "text" - }, - "neurodata_type": { - "value": "Interface", - "data_type": "text" - }, - "help?": { - "description": "Short description of what this type of Interface contains.", - "data_type": "text" - } - }, - "description": "The attributes specified here are included in all interfaces.", - "_properties": { - "abstract": true - } - } - } - } - } -}