diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e173d0e4..68f5c9868 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,8 @@ * Removed use of `jsonschema.RefResolver` as it will be deprecated from the jsonschema library [PR #1133](https://github.com/catalystneuro/neuroconv/pull/1133) * Completely removed compression settings from most places [PR #1126](https://github.com/catalystneuro/neuroconv/pull/1126) * Soft deprecation for `file_path` as an argument of `SpikeGLXNIDQInterface` and `SpikeGLXRecordingInterface` [PR #1155](https://github.com/catalystneuro/neuroconv/pull/1155) +* `starting_time` in RecordingInterfaces has given a soft deprecation in favor of time alignment methods [PR #1158](https://github.com/catalystneuro/neuroconv/pull/1158) + ## Bug Fixes * datetime objects now can be validated as conversion options [#1139](https://github.com/catalystneuro/neuroconv/pull/1126) @@ -26,6 +28,7 @@ * Use mixing tests for ecephy's mocks [PR #1136](https://github.com/catalystneuro/neuroconv/pull/1136) * Use pytest format for dandi tests to avoid window permission error on teardown [PR #1151](https://github.com/catalystneuro/neuroconv/pull/1151) * Added many docstrings for public functions [PR #1063](https://github.com/catalystneuro/neuroconv/pull/1063) +* Clean up with warnings and deprecations in the testing framework [PR #1158](https://github.com/catalystneuro/neuroconv/pull/1158) # v0.6.5 (November 1, 2024) diff --git a/docs/conversion_examples_gallery/sorting/blackrock.rst b/docs/conversion_examples_gallery/sorting/blackrock.rst index 859a2867e..fdbf299f8 100644 --- a/docs/conversion_examples_gallery/sorting/blackrock.rst +++ b/docs/conversion_examples_gallery/sorting/blackrock.rst @@ -19,7 +19,7 @@ Convert Blackrock sorting data to NWB using >>> >>> file_path = f"{ECEPHY_DATA_PATH}/blackrock/FileSpec2.3001.nev" >>> # Change the file_path to the location of the file in your system - >>> interface = BlackrockSortingInterface(file_path=file_path, verbose=False) + >>> interface = BlackrockSortingInterface(file_path=file_path, sampling_frequency=30000.0, verbose=False) >>> >>> # Extract what metadata we can from the source files >>> metadata = interface.get_metadata() diff --git a/docs/conversion_examples_gallery/sorting/neuralynx.rst b/docs/conversion_examples_gallery/sorting/neuralynx.rst index 96c3f4f21..c1015667f 100644 --- a/docs/conversion_examples_gallery/sorting/neuralynx.rst +++ b/docs/conversion_examples_gallery/sorting/neuralynx.rst @@ -20,7 +20,8 @@ Convert Neuralynx data to NWB using >>> >>> folder_path = f"{ECEPHY_DATA_PATH}/neuralynx/Cheetah_v5.5.1/original_data" >>> # Change the folder_path to the location of the data in your system - >>> interface = NeuralynxSortingInterface(folder_path=folder_path, verbose=False) + >>> # The stream is optional but is used to specify the sampling frequency of the data + >>> interface = NeuralynxSortingInterface(folder_path=folder_path, verbose=False, stream_id="0") >>> >>> metadata = interface.get_metadata() >>> session_start_time = datetime(2020, 1, 1, 12, 30, 0, tzinfo=ZoneInfo("US/Pacific")).isoformat() diff --git a/src/neuroconv/datainterfaces/ecephys/blackrock/blackrockdatainterface.py b/src/neuroconv/datainterfaces/ecephys/blackrock/blackrockdatainterface.py index 7e6e499d1..bc9f9b51b 100644 --- a/src/neuroconv/datainterfaces/ecephys/blackrock/blackrockdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/blackrock/blackrockdatainterface.py @@ -98,8 +98,8 @@ def __init__(self, file_path: FilePath, sampling_frequency: Optional[float] = No The file path to the ``.nev`` data sampling_frequency: float, optional The sampling frequency for the sorting extractor. When the signal data is available (.ncs) those files will be - used to extract the frequency automatically. Otherwise, the sampling frequency needs to be specified for - this extractor to be initialized. + used to extract the frequency automatically. Otherwise, the sampling frequency needs to be specified for + this extractor to be initialized. verbose : bool, default: True Enables verbosity """ diff --git a/src/neuroconv/datainterfaces/ecephys/neuralynx/neuralynxdatainterface.py b/src/neuroconv/datainterfaces/ecephys/neuralynx/neuralynxdatainterface.py index 446c06302..d87c8f0bd 100644 --- a/src/neuroconv/datainterfaces/ecephys/neuralynx/neuralynxdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/neuralynx/neuralynxdatainterface.py @@ -112,7 +112,13 @@ class NeuralynxSortingInterface(BaseSortingExtractorInterface): associated_suffixes = (".nse", ".ntt", ".nse", ".nev") info = "Interface for Neuralynx sorting data." - def __init__(self, folder_path: DirectoryPath, sampling_frequency: Optional[float] = None, verbose: bool = True): + def __init__( + self, + folder_path: DirectoryPath, + sampling_frequency: Optional[float] = None, + verbose: bool = True, + stream_id: Optional[str] = None, + ): """_summary_ Parameters @@ -123,9 +129,14 @@ def __init__(self, folder_path: DirectoryPath, sampling_frequency: Optional[floa If a specific sampling_frequency is desired it can be set with this argument. verbose : bool, default: True Enables verbosity + stream_id: str, optional + Used by Spikeinterface and neo to calculate the t_start, if not provided and the stream is unique + it will be chosen automatically """ - super().__init__(folder_path=folder_path, sampling_frequency=sampling_frequency, verbose=verbose) + super().__init__( + folder_path=folder_path, sampling_frequency=sampling_frequency, stream_id=stream_id, verbose=verbose + ) def extract_neo_header_metadata(neo_reader) -> dict: diff --git a/src/neuroconv/tools/spikeinterface/spikeinterface.py b/src/neuroconv/tools/spikeinterface/spikeinterface.py index fa00d58ed..d31e6032c 100644 --- a/src/neuroconv/tools/spikeinterface/spikeinterface.py +++ b/src/neuroconv/tools/spikeinterface/spikeinterface.py @@ -862,6 +862,16 @@ def add_electrical_series_to_nwbfile( whenever possible. """ + if starting_time is not None: + warnings.warn( + "The 'starting_time' parameter is deprecated and will be removed in June 2025. " + "Use the time alignment methods or set the recording times directlyfor modifying the starting time or timestamps " + "of the data if needed: " + "https://neuroconv.readthedocs.io/en/main/user_guide/temporal_alignment.html", + DeprecationWarning, + stacklevel=2, + ) + assert write_as in [ "raw", "processed", diff --git a/src/neuroconv/tools/testing/data_interface_mixins.py b/src/neuroconv/tools/testing/data_interface_mixins.py index dc45cec53..d8586f44f 100644 --- a/src/neuroconv/tools/testing/data_interface_mixins.py +++ b/src/neuroconv/tools/testing/data_interface_mixins.py @@ -1,4 +1,3 @@ -import inspect import json import tempfile from abc import abstractmethod @@ -407,7 +406,7 @@ def check_read_nwb(self, nwbfile_path: str): # Spikeinterface behavior is to load the electrode table channel_name property as a channel_id self.nwb_recording = NwbRecordingExtractor( file_path=nwbfile_path, - electrical_series_name=electrical_series_name, + electrical_series_path=f"acquisition/{electrical_series_name}", use_pynwb=True, ) @@ -439,7 +438,7 @@ def check_read_nwb(self, nwbfile_path: str): assert_array_equal( recording.get_property(property_name), self.nwb_recording.get_property(property_name) ) - if recording.has_scaled_traces() and self.nwb_recording.has_scaled_traces(): + if recording.has_scaleable_traces() and self.nwb_recording.has_scaleable_traces(): check_recordings_equal(RX1=recording, RX2=self.nwb_recording, return_scaled=True) # Compare channel groups @@ -625,29 +624,22 @@ def check_read_nwb(self, nwbfile_path: str): # NWBSortingExtractor on spikeinterface does not yet support loading data written from multiple segment. if sorting.get_num_segments() == 1: - # TODO after 0.100 release remove this if - signature = inspect.signature(NwbSortingExtractor) - if "t_start" in signature.parameters: - nwb_sorting = NwbSortingExtractor(file_path=nwbfile_path, sampling_frequency=sf, t_start=0.0) - else: - nwb_sorting = NwbSortingExtractor(file_path=nwbfile_path, sampling_frequency=sf) + nwb_sorting = NwbSortingExtractor(file_path=nwbfile_path, sampling_frequency=sf, t_start=0.0) + # In the NWBSortingExtractor, since unit_names could be not unique, # table "ids" are loaded as unit_ids. Here we rename the original sorting accordingly if "unit_name" in sorting.get_property_keys(): renamed_unit_ids = sorting.get_property("unit_name") - # sorting_renamed = sorting.rename_units(new_unit_ids=renamed_unit_ids) #TODO after 0.100 release use this - sorting_renamed = sorting.select_units(unit_ids=sorting.unit_ids, renamed_unit_ids=renamed_unit_ids) + sorting_renamed = sorting.rename_units(new_unit_ids=renamed_unit_ids) else: nwb_has_ids_as_strings = all(isinstance(id, str) for id in nwb_sorting.unit_ids) if nwb_has_ids_as_strings: - renamed_unit_ids = sorting.get_unit_ids() - renamed_unit_ids = [str(id) for id in renamed_unit_ids] + renamed_unit_ids = [str(id) for id in sorting.get_unit_ids()] else: renamed_unit_ids = np.arange(len(sorting.unit_ids)) - # sorting_renamed = sorting.rename_units(new_unit_ids=sorting.unit_ids) #TODO after 0.100 release use this - sorting_renamed = sorting.select_units(unit_ids=sorting.unit_ids, renamed_unit_ids=renamed_unit_ids) + sorting_renamed = sorting.rename_units(new_unit_ids=renamed_unit_ids) check_sortings_equal(SX1=sorting_renamed, SX2=nwb_sorting) def check_interface_set_aligned_segment_timestamps(self): diff --git a/src/neuroconv/tools/testing/mock_interfaces.py b/src/neuroconv/tools/testing/mock_interfaces.py index 0350806bb..38cc750ab 100644 --- a/src/neuroconv/tools/testing/mock_interfaces.py +++ b/src/neuroconv/tools/testing/mock_interfaces.py @@ -271,6 +271,10 @@ def __init__( verbose=verbose, ) + # Sorting extractor to have string unit ids until is changed in SpikeInterface + string_unit_ids = [str(id) for id in self.sorting_extractor.unit_ids] + self.sorting_extractor = self.sorting_extractor.rename_units(new_unit_ids=string_unit_ids) + def get_metadata(self) -> dict: metadata = super().get_metadata() session_start_time = datetime.now().astimezone() diff --git a/tests/test_ecephys/test_ecephys_interfaces.py b/tests/test_ecephys/test_ecephys_interfaces.py index e036ccb81..168ce5068 100644 --- a/tests/test_ecephys/test_ecephys_interfaces.py +++ b/tests/test_ecephys/test_ecephys_interfaces.py @@ -1,24 +1,12 @@ -import shutil -import unittest -from datetime import datetime -from pathlib import Path from platform import python_version as get_python_version -from tempfile import mkdtemp -from warnings import warn import jsonschema import numpy as np import pytest from hdmf.testing import TestCase from packaging.version import Version -from pynwb import NWBHDF5IO -from spikeinterface.extractors import NumpySorting -from neuroconv import NWBConverter from neuroconv.datainterfaces import Spike2RecordingInterface -from neuroconv.datainterfaces.ecephys.basesortingextractorinterface import ( - BaseSortingExtractorInterface, -) from neuroconv.tools.nwb_helpers import get_module from neuroconv.tools.testing.mock_interfaces import ( MockRecordingInterface, @@ -54,6 +42,45 @@ def test_propagate_conversion_options(self, setup_interface): assert nwbfile.units is None assert "processed_units" in ecephys.data_interfaces + def test_stub(self): + + interface = MockSortingInterface(num_units=4, durations=[1.0]) + sorting_extractor = interface.sorting_extractor + unit_ids = sorting_extractor.unit_ids + first_unit_spike = { + unit_id: sorting_extractor.get_unit_spike_train(unit_id=unit_id, return_times=True)[0] + for unit_id in unit_ids + } + + nwbfile = interface.create_nwbfile(stub_test=True) + units_table = nwbfile.units.to_dataframe() + + for unit_id, first_spike_time in first_unit_spike.items(): + unit_row = units_table[units_table["unit_name"] == unit_id] + unit_spike_times = unit_row["spike_times"].values[0] + np.testing.assert_almost_equal(unit_spike_times[0], first_spike_time, decimal=6) + + def test_stub_with_recording(self): + interface = MockSortingInterface(num_units=4, durations=[1.0]) + + recording_interface = MockRecordingInterface(num_channels=4, durations=[2.0]) + interface.register_recording(recording_interface) + + sorting_extractor = interface.sorting_extractor + unit_ids = sorting_extractor.unit_ids + first_unit_spike = { + unit_id: sorting_extractor.get_unit_spike_train(unit_id=unit_id, return_times=True)[0] + for unit_id in unit_ids + } + + nwbfile = interface.create_nwbfile(stub_test=True) + units_table = nwbfile.units.to_dataframe() + + for unit_id, first_spike_time in first_unit_spike.items(): + unit_row = units_table[units_table["unit_name"] == unit_id] + unit_spike_times = unit_row["spike_times"].values[0] + np.testing.assert_almost_equal(unit_spike_times[0], first_spike_time, decimal=6) + def test_electrode_indices(self, setup_interface): recording_interface = MockRecordingInterface(num_channels=4, durations=[0.100]) @@ -136,91 +163,3 @@ def test_spike2_import_assertions_3_11(self): exc_msg="\nThe package 'sonpy' is not available for Python version 3.11!", ): Spike2RecordingInterface.get_all_channels_info(file_path="does_not_matter.smrx") - - -class TestSortingInterfaceOld(unittest.TestCase): - """Old-style tests for the SortingInterface. Remove once we we are sure all the behaviors are covered by the mock.""" - - @classmethod - def setUpClass(cls) -> None: - cls.test_dir = Path(mkdtemp()) - cls.sorting_start_frames = [100, 200, 300] - cls.num_frames = 1000 - cls.sampling_frequency = 3000.0 - times = np.array([], dtype="int") - labels = np.array([], dtype="int") - for i, start_frame in enumerate(cls.sorting_start_frames): - times_i = np.arange(start_frame, cls.num_frames, dtype="int") - labels_i = (i + 1) * np.ones_like(times_i, dtype="int") - times = np.concatenate((times, times_i)) - labels = np.concatenate((labels, labels_i)) - sorting = NumpySorting.from_times_labels(times, labels, sampling_frequency=cls.sampling_frequency) - - class TestSortingInterface(BaseSortingExtractorInterface): - ExtractorName = "NumpySorting" - - def __init__(self, verbose: bool = True): - self.sorting_extractor = sorting - self.source_data = dict() - self.verbose = verbose - - class TempConverter(NWBConverter): - data_interface_classes = dict(TestSortingInterface=TestSortingInterface) - - source_data = dict(TestSortingInterface=dict()) - cls.test_sorting_interface = TempConverter(source_data) - - @classmethod - def tearDownClass(cls): - try: - shutil.rmtree(cls.test_dir) - except PermissionError: # Windows CI bug - warn(f"Unable to fully clean the temporary directory: {cls.test_dir}\n\nPlease remove it manually.") - - def test_sorting_stub(self): - minimal_nwbfile = self.test_dir / "stub_temp.nwb" - conversion_options = dict(TestSortingInterface=dict(stub_test=True)) - metadata = self.test_sorting_interface.get_metadata() - metadata["NWBFile"]["session_start_time"] = datetime.now().astimezone() - self.test_sorting_interface.run_conversion( - nwbfile_path=minimal_nwbfile, metadata=metadata, conversion_options=conversion_options - ) - with NWBHDF5IO(minimal_nwbfile, "r") as io: - nwbfile = io.read() - start_frame_max = np.max(self.sorting_start_frames) - for i, start_times in enumerate(self.sorting_start_frames): - assert len(nwbfile.units["spike_times"][i]) == (start_frame_max * 1.1) - start_times - - def test_sorting_stub_with_recording(self): - subset_end_frame = int(np.max(self.sorting_start_frames) * 1.1 - 1) - sorting_interface = self.test_sorting_interface.data_interface_objects["TestSortingInterface"] - sorting_interface.sorting_extractor = sorting_interface.sorting_extractor.frame_slice( - start_frame=0, end_frame=subset_end_frame - ) - recording_interface = MockRecordingInterface( - durations=[subset_end_frame / self.sampling_frequency], - sampling_frequency=self.sampling_frequency, - ) - sorting_interface.register_recording(recording_interface) - - minimal_nwbfile = self.test_dir / "stub_temp_recording.nwb" - conversion_options = dict(TestSortingInterface=dict(stub_test=True)) - metadata = self.test_sorting_interface.get_metadata() - metadata["NWBFile"]["session_start_time"] = datetime.now().astimezone() - self.test_sorting_interface.run_conversion( - nwbfile_path=minimal_nwbfile, metadata=metadata, conversion_options=conversion_options - ) - with NWBHDF5IO(minimal_nwbfile, "r") as io: - nwbfile = io.read() - for i, start_times in enumerate(self.sorting_start_frames): - assert len(nwbfile.units["spike_times"][i]) == subset_end_frame - start_times - - def test_sorting_full(self): - minimal_nwbfile = self.test_dir / "temp.nwb" - metadata = self.test_sorting_interface.get_metadata() - metadata["NWBFile"]["session_start_time"] = datetime.now().astimezone() - self.test_sorting_interface.run_conversion(nwbfile_path=minimal_nwbfile, metadata=metadata) - with NWBHDF5IO(minimal_nwbfile, "r") as io: - nwbfile = io.read() - for i, start_times in enumerate(self.sorting_start_frames): - assert len(nwbfile.units["spike_times"][i]) == self.num_frames - start_times diff --git a/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_configure_backend_defaults.py b/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_configure_backend_defaults.py index 693d8e4d0..a2f267c5d 100644 --- a/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_configure_backend_defaults.py +++ b/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_configure_backend_defaults.py @@ -66,7 +66,7 @@ def test_simple_time_series( dataset_configuration = backend_configuration.dataset_configurations["acquisition/TestTimeSeries/data"] configure_backend(nwbfile=nwbfile, backend_configuration=backend_configuration) - nwbfile_path = str(tmpdir / f"test_configure_defaults_{case_name}_time_series.nwb.{backend}") + nwbfile_path = str(tmpdir / f"test_configure_defaults_{case_name}_time_series.nwb") with BACKEND_NWB_IO[backend](path=nwbfile_path, mode="w") as io: io.write(nwbfile) @@ -98,7 +98,7 @@ def test_simple_dynamic_table(tmpdir: Path, integer_array: np.ndarray, backend: dataset_configuration = backend_configuration.dataset_configurations["acquisition/TestDynamicTable/TestColumn/data"] configure_backend(nwbfile=nwbfile, backend_configuration=backend_configuration) - nwbfile_path = str(tmpdir / f"test_configure_defaults_dynamic_table.nwb.{backend}") + nwbfile_path = str(tmpdir / f"test_configure_defaults_dynamic_table.nwb") NWB_IO = BACKEND_NWB_IO[backend] with NWB_IO(path=nwbfile_path, mode="w") as io: io.write(nwbfile) @@ -164,7 +164,7 @@ def test_time_series_timestamps_linkage( assert nwbfile.acquisition["TestTimeSeries1"].timestamps assert nwbfile.acquisition["TestTimeSeries2"].timestamps - nwbfile_path = str(tmpdir / f"test_time_series_timestamps_linkage_{case_name}_data.nwb.{backend}") + nwbfile_path = str(tmpdir / f"test_time_series_timestamps_linkage_{case_name}_data.nwb") with BACKEND_NWB_IO[backend](path=nwbfile_path, mode="w") as io: io.write(nwbfile) diff --git a/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_configure_backend_equivalency.py b/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_configure_backend_equivalency.py index dd938721f..225e0af66 100644 --- a/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_configure_backend_equivalency.py +++ b/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_configure_backend_equivalency.py @@ -69,7 +69,7 @@ def test_configure_backend_equivalency( dataset_configuration.compression_options = {"level": 2} configure_backend(nwbfile=nwbfile_1, backend_configuration=backend_configuration_2) - nwbfile_path = str(tmpdir / f"test_configure_backend_equivalency.nwb.{backend}") + nwbfile_path = str(tmpdir / f"test_configure_backend_equivalency.nwb") with BACKEND_NWB_IO[backend](path=nwbfile_path, mode="w") as io: io.write(nwbfile_1) diff --git a/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_configure_backend_overrides.py b/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_configure_backend_overrides.py index dba8bb891..15cc9ee48 100644 --- a/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_configure_backend_overrides.py +++ b/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_configure_backend_overrides.py @@ -58,7 +58,7 @@ def test_simple_time_series_override( if case_name != "unwrapped": # TODO: eventually, even this case will be buffered automatically assert nwbfile.acquisition["TestTimeSeries"].data - nwbfile_path = str(tmpdir / f"test_configure_defaults_{case_name}_data.nwb.{backend}") + nwbfile_path = str(tmpdir / f"test_configure_defaults_{case_name}_data.nwb") with BACKEND_NWB_IO[backend](path=nwbfile_path, mode="w") as io: io.write(nwbfile) @@ -99,7 +99,7 @@ def test_simple_dynamic_table_override(tmpdir: Path, backend: Literal["hdf5", "z configure_backend(nwbfile=nwbfile, backend_configuration=backend_configuration) - nwbfile_path = str(tmpdir / f"test_configure_defaults_dynamic_table.nwb.{backend}") + nwbfile_path = str(tmpdir / f"test_configure_defaults_dynamic_table.nwb") NWB_IO = BACKEND_NWB_IO[backend] with NWB_IO(path=nwbfile_path, mode="w") as io: io.write(nwbfile) diff --git a/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_configure_backend_zero_length_axes.py b/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_configure_backend_zero_length_axes.py index 39980f22e..c0c63d6c7 100644 --- a/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_configure_backend_zero_length_axes.py +++ b/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_configure_backend_zero_length_axes.py @@ -114,7 +114,7 @@ def test_dynamic_table_skip_zero_length_axis( dataset_configuration = backend_configuration.dataset_configurations["acquisition/TestDynamicTable/TestColumn/data"] configure_backend(nwbfile=nwbfile, backend_configuration=backend_configuration) - nwbfile_path = str(tmpdir / f"test_configure_defaults_dynamic_table.nwb.{backend}") + nwbfile_path = str(tmpdir / f"test_configure_defaults_dynamic_table.nwb") NWB_IO = BACKEND_NWB_IO[backend] with NWB_IO(path=nwbfile_path, mode="w") as io: io.write(nwbfile) diff --git a/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_get_default_dataset_io_configurations_appended_files.py b/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_get_default_dataset_io_configurations_appended_files.py index dca727f03..0dfd4650f 100644 --- a/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_get_default_dataset_io_configurations_appended_files.py +++ b/tests/test_minimal/test_tools/test_backend_and_dataset_configuration/test_helpers/test_get_default_dataset_io_configurations_appended_files.py @@ -31,7 +31,7 @@ def generate_nwbfile_with_existing_time_series() -> NWBFile: @pytest.fixture(scope="session") def hdf5_nwbfile_path(tmpdir_factory): - nwbfile_path = tmpdir_factory.mktemp("data").join("test_default_dataset_configurations_hdf5_nwbfile_.nwb.h5") + nwbfile_path = tmpdir_factory.mktemp("data").join("test_default_dataset_configurations_hdf5_nwbfile_.nwb") if not Path(nwbfile_path).exists(): nwbfile = generate_nwbfile_with_existing_time_series() with NWBHDF5IO(path=str(nwbfile_path), mode="w") as io: diff --git a/tests/test_on_data/ecephys/test_lfp.py b/tests/test_on_data/ecephys/test_lfp.py index 010516d86..1c537238f 100644 --- a/tests/test_on_data/ecephys/test_lfp.py +++ b/tests/test_on_data/ecephys/test_lfp.py @@ -99,7 +99,7 @@ class TestConverter(NWBConverter): npt.assert_array_equal(x=recording.get_traces(return_scaled=False), y=nwb_lfp_unscaled) # This can only be tested if both gain and offset are present - if recording.has_scaled_traces(): + if recording.has_scaleable_traces(): channel_conversion = nwb_lfp_electrical_series.channel_conversion nwb_lfp_conversion_vector = ( channel_conversion[:] diff --git a/tests/test_on_data/ecephys/test_raw_recordings.py b/tests/test_on_data/ecephys/test_raw_recordings.py index f892d6457..08826e3ba 100644 --- a/tests/test_on_data/ecephys/test_raw_recordings.py +++ b/tests/test_on_data/ecephys/test_raw_recordings.py @@ -97,16 +97,14 @@ class TestConverter(NWBConverter): renamed_channel_ids = recording.get_property("channel_name") else: renamed_channel_ids = recording.get_channel_ids().astype("str") - recording = recording.channel_slice( - channel_ids=recording.get_channel_ids(), renamed_channel_ids=renamed_channel_ids - ) + recording = recording.rename_channels(new_channel_ids=renamed_channel_ids) # Edge case that only occurs in testing, but should eventually be fixed nonetheless # The NwbRecordingExtractor on spikeinterface experiences an issue when duplicated channel_ids # are specified, which occurs during check_recordings_equal when there is only one channel if nwb_recording.get_channel_ids()[0] != nwb_recording.get_channel_ids()[-1]: check_recordings_equal(RX1=recording, RX2=nwb_recording, return_scaled=False) - if recording.has_scaled_traces() and nwb_recording.has_scaled_traces(): + if recording.has_scaleable_traces() and nwb_recording.has_scaleable_traces(): check_recordings_equal(RX1=recording, RX2=nwb_recording, return_scaled=True) diff --git a/tests/test_on_data/ecephys/test_sorting_interfaces.py b/tests/test_on_data/ecephys/test_sorting_interfaces.py index 492c4ad9f..054f08391 100644 --- a/tests/test_on_data/ecephys/test_sorting_interfaces.py +++ b/tests/test_on_data/ecephys/test_sorting_interfaces.py @@ -27,7 +27,7 @@ class TestBlackrockSortingInterface(SortingExtractorInterfaceTestMixin): data_interface_cls = BlackrockSortingInterface - interface_kwargs = dict(file_path=str(DATA_PATH / "blackrock" / "FileSpec2.3001.nev")) + interface_kwargs = dict(file_path=str(DATA_PATH / "blackrock" / "FileSpec2.3001.nev"), sampling_frequency=30_000.0) associated_recording_cls = BlackrockRecordingInterface associated_recording_kwargs = dict(file_path=str(DATA_PATH / "blackrock" / "FileSpec2.3001.ns5")) @@ -154,13 +154,17 @@ def test_writing_channel_metadata(self, setup_interface): class TestNeuralynxSortingInterfaceCheetahV551(SortingExtractorInterfaceTestMixin): data_interface_cls = NeuralynxSortingInterface - interface_kwargs = dict(folder_path=str(DATA_PATH / "neuralynx" / "Cheetah_v5.5.1" / "original_data")) + interface_kwargs = dict( + folder_path=str(DATA_PATH / "neuralynx" / "Cheetah_v5.5.1" / "original_data"), stream_id="0" + ) save_directory = OUTPUT_PATH class TestNeuralynxSortingInterfaceCheetah563(SortingExtractorInterfaceTestMixin): data_interface_cls = NeuralynxSortingInterface - interface_kwargs = dict(folder_path=str(DATA_PATH / "neuralynx" / "Cheetah_v5.6.3" / "original_data")) + interface_kwargs = dict( + folder_path=str(DATA_PATH / "neuralynx" / "Cheetah_v5.6.3" / "original_data"), stream_id="0" + ) save_directory = OUTPUT_PATH diff --git a/tests/test_on_data/ecephys/test_spikeglx_converter.py b/tests/test_on_data/ecephys/test_spikeglx_converter.py index 93b228053..27a8ed0c5 100644 --- a/tests/test_on_data/ecephys/test_spikeglx_converter.py +++ b/tests/test_on_data/ecephys/test_spikeglx_converter.py @@ -212,7 +212,7 @@ def test_electrode_table_writing(tmp_path): # Test round trip with spikeinterface recording_extractor_ap = NwbRecordingExtractor( file_path=nwbfile_path, - electrical_series_name="ElectricalSeriesAP", + electrical_series_path="acquisition/ElectricalSeriesAP", ) channel_ids = recording_extractor_ap.get_channel_ids() @@ -220,7 +220,7 @@ def test_electrode_table_writing(tmp_path): recording_extractor_lf = NwbRecordingExtractor( file_path=nwbfile_path, - electrical_series_name="ElectricalSeriesLF", + electrical_series_path="acquisition/ElectricalSeriesLF", ) channel_ids = recording_extractor_lf.get_channel_ids() diff --git a/tests/test_on_data/ecephys/test_spikeglx_metadata.py b/tests/test_on_data/ecephys/test_spikeglx_metadata.py index 29d4cb5ad..9d57f049d 100644 --- a/tests/test_on_data/ecephys/test_spikeglx_metadata.py +++ b/tests/test_on_data/ecephys/test_spikeglx_metadata.py @@ -1,7 +1,6 @@ import datetime import probeinterface as pi -import pytest from numpy.testing import assert_array_equal from spikeinterface.extractors import SpikeGLXRecordingExtractor @@ -38,7 +37,8 @@ def test_spikelgx_recording_property_addition(): expected_contact_ids = probe.contact_ids # Initialize the interface and get the added properties - interface = SpikeGLXRecordingInterface(file_path=ap_file_path) + folder_path = ap_file_path.parent + interface = SpikeGLXRecordingInterface(folder_path=folder_path, stream_id="imec0.ap") group_name = interface.recording_extractor.get_property("group_name") contact_shapes = interface.recording_extractor.get_property("contact_shapes") shank_ids = interface.recording_extractor.get_property("shank_ids") @@ -48,21 +48,3 @@ def test_spikelgx_recording_property_addition(): assert_array_equal(contact_shapes, expected_contact_shapes) assert_array_equal(shank_ids, expected_shank_ids) assert_array_equal(contact_ids, expected_contact_ids) - - -@pytest.mark.skip(reason="Legacy spikeextractors cannot read new GIN file.") -def test_matching_recording_property_addition_between_backends(): - """Test that the extracted properties match with both backends""" - folder_path = SPIKEGLX_PATH / "Noise4Sam_g0" / "Noise4Sam_g0_imec0" - ap_file_path = folder_path / "Noise4Sam_g0_t0.imec0.ap.bin" - - interface_new = SpikeGLXRecordingInterface(file_path=ap_file_path) - shank_electrode_number_new = interface_new.recording_extractor.get_property("shank_electrode_number") - group_name_new = interface_new.recording_extractor.get_property("group_name") - - interface_old = SpikeGLXRecordingInterface(file_path=ap_file_path, spikeextractors_backend=True) - shank_electrode_number_old = interface_old.recording_extractor.get_property("shank_electrode_number") - group_name_old = interface_old.recording_extractor.get_property("group_name") - - assert_array_equal(shank_electrode_number_new, shank_electrode_number_old) - assert_array_equal(group_name_new, group_name_old)