diff --git a/src/schneider_lab_to_nwb/schneider_2024/__init__.py b/src/schneider_lab_to_nwb/schneider_2024/__init__.py deleted file mode 100644 index 11f8f68..0000000 --- a/src/schneider_lab_to_nwb/schneider_2024/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .schneider_2024_behaviorinterface import Schneider2024BehaviorInterface -from .schneider_2024_optogeneticinterface import Schneider2024OptogeneticInterface -from .schneider_2024_intrinsic_signal_imaging_interface import Schneider2024IntrinsicSignalOpticalImagingInterface -from .schneider_2024_nwbconverter import Schneider2024NWBConverter diff --git a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_convert_all_sessions.py b/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_convert_all_sessions.py deleted file mode 100644 index 8cb5833..0000000 --- a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_convert_all_sessions.py +++ /dev/null @@ -1,108 +0,0 @@ -"""Primary script to run to convert all sessions in a dataset using session_to_nwb.""" -from pathlib import Path -from concurrent.futures import ProcessPoolExecutor, as_completed -from pprint import pformat -import traceback -from tqdm import tqdm - -from .schneider_2024_convert_session import session_to_nwb - - -def dataset_to_nwb( - *, - data_dir_path: str | Path, - output_dir_path: str | Path, - max_workers: int = 1, - verbose: bool = True, -): - """Convert the entire dataset to NWB. - - Parameters - ---------- - data_dir_path : str | Path - The path to the directory containing the raw data. - output_dir_path : str | Path - The path to the directory where the NWB files will be saved. - max_workers : int, optional - The number of workers to use for parallel processing, by default 1 - verbose : bool, optional - Whether to print verbose output, by default True - """ - data_dir_path = Path(data_dir_path) - session_to_nwb_kwargs_per_session = get_session_to_nwb_kwargs_per_session( - data_dir_path=data_dir_path, - ) - - futures = [] - with ProcessPoolExecutor(max_workers=max_workers) as executor: - for session_to_nwb_kwargs in session_to_nwb_kwargs_per_session: - session_to_nwb_kwargs["output_dir_path"] = output_dir_path - session_to_nwb_kwargs["verbose"] = verbose - exception_file_path = data_dir_path / f"ERROR_.txt" # Add error file path here - futures.append( - executor.submit( - safe_session_to_nwb, - session_to_nwb_kwargs=session_to_nwb_kwargs, - exception_file_path=exception_file_path, - ) - ) - for _ in tqdm(as_completed(futures), total=len(futures)): - pass - - -def safe_session_to_nwb(*, session_to_nwb_kwargs: dict, exception_file_path: str | Path): - """Convert a session to NWB while handling any errors by recording error messages to the exception_file_path. - - Parameters - ---------- - session_to_nwb_kwargs : dict - The arguments for session_to_nwb. - exception_file_path : Path - The path to the file where the exception messages will be saved. - """ - exception_file_path = Path(exception_file_path) - try: - session_to_nwb(**session_to_nwb_kwargs) - except Exception as e: - with open(exception_file_path, mode="w") as f: - f.write(f"session_to_nwb_kwargs: \n {pformat(session_to_nwb_kwargs)}\n\n") - f.write(traceback.format_exc()) - - -def get_session_to_nwb_kwargs_per_session( - *, - data_dir_path: str | Path, -): - """Get the kwargs for session_to_nwb for each session in the dataset. - - Parameters - ---------- - data_dir_path : str | Path - The path to the directory containing the raw data. - - Returns - ------- - list[dict[str, Any]] - A list of dictionaries containing the kwargs for session_to_nwb for each session. - """ - ##### - # # Implement this function to return the kwargs for session_to_nwb for each session - # This can be a specific list with hard-coded sessions, a path expansion or any conversion specific logic that you might need - ##### - raise NotImplementedError - - -if __name__ == "__main__": - - # Parameters for conversion - data_dir_path = Path("/Directory/With/Raw/Formats/") - output_dir_path = Path("~/conversion_nwb/") - max_workers = 1 - verbose = False - - dataset_to_nwb( - data_dir_path=data_dir_path, - output_dir_path=output_dir_path, - max_workers=max_workers, - verbose=False, - ) diff --git a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_convert_session.py b/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_convert_session.py deleted file mode 100644 index b4e7e7d..0000000 --- a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_convert_session.py +++ /dev/null @@ -1,140 +0,0 @@ -"""Primary script to run to convert an entire session for of data using the NWBConverter.""" -from pathlib import Path -import datetime -import pytz -from zoneinfo import ZoneInfo -import shutil -from pprint import pprint -import numpy as np - -from neuroconv.utils import load_dict_from_file, dict_deep_update -from schneider_lab_to_nwb.schneider_2024 import Schneider2024NWBConverter - - -def session_to_nwb( - recording_folder_path: str | Path, - sorting_folder_path: str | Path, - behavior_file_path: str | Path, - video_folder_path: str | Path, - intrinsic_signal_optical_imaging_folder_path: str | Path, - output_dir_path: str | Path, - stub_test: bool = False, -): - recording_folder_path = Path(recording_folder_path) - sorting_folder_path = Path(sorting_folder_path) - behavior_file_path = Path(behavior_file_path) - video_folder_path = Path(video_folder_path) - intrinsic_signal_optical_imaging_folder_path = Path(intrinsic_signal_optical_imaging_folder_path) - output_dir_path = Path(output_dir_path) - video_file_paths = [ - file_path for file_path in video_folder_path.glob("*.mp4") if not file_path.name.startswith("._") - ] - video_file_paths = sorted(video_file_paths) - if stub_test: - output_dir_path = output_dir_path / "nwb_stub" - recording_folder_path = recording_folder_path.with_name(recording_folder_path.name + "_stubbed") - output_dir_path.mkdir(parents=True, exist_ok=True) - - session_id = "sample_session" - nwbfile_path = output_dir_path / f"{session_id}.nwb" - - source_data = dict() - conversion_options = dict() - - # Add Recording - stream_name = "Signals CH" # stream_names = ["Signals CH", "Signals AUX"] - source_data.update(dict(Recording=dict(folder_path=recording_folder_path, stream_name=stream_name))) - conversion_options.update(dict(Recording=dict(stub_test=stub_test))) - - # Add Sorting - source_data.update(dict(Sorting=dict(folder_path=sorting_folder_path))) - conversion_options.update(dict(Sorting=dict())) - - # Add Behavior - source_data.update(dict(Behavior=dict(file_path=behavior_file_path))) - conversion_options.update(dict(Behavior=dict())) - - # Add Video(s) - for i, video_file_path in enumerate(video_file_paths): - metadata_key_name = f"VideoCamera{i+1}" - source_data.update({metadata_key_name: dict(file_paths=[video_file_path], metadata_key_name=metadata_key_name)}) - conversion_options.update({metadata_key_name: dict()}) - - # Add Optogenetic - source_data.update(dict(Optogenetic=dict(file_path=behavior_file_path))) - conversion_options.update(dict(Optogenetic=dict())) - - # Add Intrinsic Signal Optical Imaging - source_data.update(dict(ISOI=dict(folder_path=intrinsic_signal_optical_imaging_folder_path))) - conversion_options.update(dict(ISOI=dict())) - - converter = Schneider2024NWBConverter(source_data=source_data) - - # Add datetime to conversion - metadata = converter.get_metadata() - EST = ZoneInfo("US/Eastern") - metadata["NWBFile"]["session_start_time"] = metadata["NWBFile"]["session_start_time"].replace(tzinfo=EST) - - # Update default metadata with the editable in the corresponding yaml file - editable_metadata_path = Path(__file__).parent / "schneider_2024_metadata.yaml" - editable_metadata = load_dict_from_file(editable_metadata_path) - metadata = dict_deep_update(metadata, editable_metadata) - - metadata["Subject"]["subject_id"] = "a_subject_id" # Modify here or in the yaml file - conversion_options["Sorting"]["units_description"] = metadata["Sorting"]["units_description"] - - # Add electrode metadata - channel_positions = np.load(sorting_folder_path / "channel_positions.npy") - if stub_test: - channel_positions = channel_positions[:1, :] - location = metadata["Ecephys"]["ElectrodeGroup"][0]["location"] - channel_ids = converter.data_interface_objects["Recording"].recording_extractor.get_channel_ids() - converter.data_interface_objects["Recording"].recording_extractor.set_channel_locations( - channel_ids=channel_ids, locations=channel_positions - ) - converter.data_interface_objects["Recording"].recording_extractor.set_property( - key="brain_area", - ids=channel_ids, - values=[location] * len(channel_ids), - ) - metadata["Ecephys"]["Device"] = editable_metadata["Ecephys"]["Device"] - - # Overwrite video metadata - for i, video_file_path in enumerate(video_file_paths): - metadata_key_name = f"VideoCamera{i+1}" - metadata["Behavior"][metadata_key_name] = editable_metadata["Behavior"][metadata_key_name] - - # Run conversion - converter.run_conversion(metadata=metadata, nwbfile_path=nwbfile_path, conversion_options=conversion_options) - - -def main(): - # Parameters for conversion - data_dir_path = Path("/Volumes/T7/CatalystNeuro/Schneider") - output_dir_path = Path("/Volumes/T7/CatalystNeuro/Schneider/conversion_nwb") - stub_test = True - - if output_dir_path.exists(): - shutil.rmtree(output_dir_path, ignore_errors=True) - - # Example Session w/ old ephys + new behavior - recording_folder_path = data_dir_path / "Schneider sample Data" / "Raw Ephys" / "m69_2023-10-31_17-24-15_Day1_A1" - sorting_folder_path = ( - data_dir_path / "Schneider sample Data" / "Processed Ephys" / "m69_2023-10-31_17-24-15_Day1_A1" - ) - behavior_file_path = data_dir_path / "NWB_Share" / "Sample behavior data" / "m74_optoSample.mat" - video_folder_path = data_dir_path / "Schneider sample Data" / "Video" / "m69_231031" - intrinsic_signal_optical_imaging_folder_path = data_dir_path / "NWB_Share" / "Sample Intrinsic imaging data" - session_to_nwb( - recording_folder_path=recording_folder_path, - sorting_folder_path=sorting_folder_path, - behavior_file_path=behavior_file_path, - video_folder_path=video_folder_path, - intrinsic_signal_optical_imaging_folder_path=intrinsic_signal_optical_imaging_folder_path, - output_dir_path=output_dir_path, - stub_test=stub_test, - ) - - -if __name__ == "__main__": - main() diff --git a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_notes.md b/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_notes.md deleted file mode 100644 index 1a2030e..0000000 --- a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_notes.md +++ /dev/null @@ -1,23 +0,0 @@ -# Notes concerning the schneider_2024 conversion - -## Behavior - -## Video - -## Optogenetics -- is_opto_trial in the trials table is `np.logical_not(np.isnan(onset_times))` rather than reading from the .mat file - to ensure consistency with the onset/offset times. - -## Intrinsic Signal Optical Imaging -- Just including raw blood vessel image and processed overlay + pixel locations bc including the isoi roi response series would really require an extension for context, but seems like it has limited reuse potential. -- Used the Audette paper for description of overlay image. -- Need pixel locs for ephys -- Need device info for 2p microscope and red light laser -- Why is the overlaid image flipped left/right compared to the original? - - -## Data Requests -- Mice sexes -- Remaining data for Grant's project -- More detailed position info for recording probe -- Detailed description of temporal alignment procedure. diff --git a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_nwbconverter.py b/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_nwbconverter.py deleted file mode 100644 index e51d6e5..0000000 --- a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_nwbconverter.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Primary NWBConverter class for this dataset.""" -from neuroconv import NWBConverter -from neuroconv.datainterfaces import ( - OpenEphysRecordingInterface, - PhySortingInterface, - VideoInterface, -) -from neuroconv.basedatainterface import BaseDataInterface - -from schneider_lab_to_nwb.schneider_2024 import ( - Schneider2024BehaviorInterface, - Schneider2024OptogeneticInterface, - Schneider2024IntrinsicSignalOpticalImagingInterface, -) - - -class Schneider2024NWBConverter(NWBConverter): - """Primary conversion class for my extracellular electrophysiology dataset.""" - - data_interface_classes = dict( - Recording=OpenEphysRecordingInterface, - Sorting=PhySortingInterface, - Behavior=Schneider2024BehaviorInterface, - VideoCamera1=VideoInterface, - VideoCamera2=VideoInterface, - Optogenetic=Schneider2024OptogeneticInterface, - ISOI=Schneider2024IntrinsicSignalOpticalImagingInterface, - ) diff --git a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_requirements.txt b/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_requirements.txt deleted file mode 100644 index 458b8a2..0000000 --- a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -nwb-conversion-tools==0.11.1 # Example of specific pinned dependecy -some-extra-package==1.11.3 # Example of another extra package that's necessary for the current conversion -roiextractors @ git+https://github.com/catalystneuro/roiextractors.git@8db5f9cb3a7ee5efee49b7fd0b694c7a8105519a # Github pinned dependency diff --git a/src/schneider_lab_to_nwb/zempolich_2024/__init__.py b/src/schneider_lab_to_nwb/zempolich_2024/__init__.py new file mode 100644 index 0000000..441f49d --- /dev/null +++ b/src/schneider_lab_to_nwb/zempolich_2024/__init__.py @@ -0,0 +1,5 @@ +from .zempolich_2024_behaviorinterface import Zempolich2024BehaviorInterface +from .zempolich_2024_optogeneticinterface import Zempolich2024OptogeneticInterface +from .zempolich_2024_intrinsic_signal_imaging_interface import Zempolich2024IntrinsicSignalOpticalImagingInterface +from .zempolich_2024_open_ephys_recording_interface import Zempolich2024OpenEphysRecordingInterface +from .zempolich_2024_nwbconverter import Zempolich2024NWBConverter diff --git a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_behaviorinterface.py b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_behaviorinterface.py similarity index 76% rename from src/schneider_lab_to_nwb/schneider_2024/schneider_2024_behaviorinterface.py rename to src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_behaviorinterface.py index 63b7b97..0c6a65f 100644 --- a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_behaviorinterface.py +++ b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_behaviorinterface.py @@ -13,7 +13,7 @@ from neuroconv.tools import nwb_helpers -class Schneider2024BehaviorInterface(BaseDataInterface): +class Zempolich2024BehaviorInterface(BaseDataInterface): """Behavior interface for schneider_2024 conversion""" keywords = ("behavior",) @@ -89,14 +89,19 @@ def get_metadata_schema(self) -> dict: } return metadata_schema - def add_to_nwbfile(self, nwbfile: NWBFile, metadata: dict): + def add_to_nwbfile( + self, nwbfile: NWBFile, metadata: dict, normalize_timestamps: bool = False, verbose: bool = False + ): # Read Data file_path = self.source_data["file_path"] file = read_mat(file_path) behavioral_time_series, name_to_times, name_to_values, name_to_trial_array = [], dict(), dict(), dict() + starting_timestamp = file["continuous"][metadata["Behavior"]["TimeSeries"][0]["name"]]["time"][0] for time_series_dict in metadata["Behavior"]["TimeSeries"]: name = time_series_dict["name"] timestamps = np.array(file["continuous"][name]["time"]).squeeze() + if normalize_timestamps: + timestamps = timestamps - starting_timestamp data = np.array(file["continuous"][name]["value"]).squeeze() time_series = TimeSeries( name=name, @@ -109,20 +114,34 @@ def add_to_nwbfile(self, nwbfile: NWBFile, metadata: dict): for event_dict in metadata["Behavior"]["Events"]: name = event_dict["name"] times = np.array(file["events"][name]["time"]).squeeze() + if normalize_timestamps: + times = times - starting_timestamp name_to_times[name] = times for event_dict in metadata["Behavior"]["ValuedEvents"]: name = event_dict["name"] times = np.array(file["events"][name]["time"]).squeeze() + if normalize_timestamps: + times = times - starting_timestamp values = np.array(file["events"][name]["value"]).squeeze() name_to_times[name] = times name_to_values[name] = values trial_start_times = np.array(file["events"]["push"]["time"]).squeeze() trial_stop_times = np.array(file["events"]["push"]["time_end"]).squeeze() + trial_is_nan = np.isnan(trial_start_times) | np.isnan(trial_stop_times) + trial_start_times = trial_start_times[~trial_is_nan] + trial_stop_times = trial_stop_times[~trial_is_nan] + if normalize_timestamps: + trial_start_times = trial_start_times - starting_timestamp + trial_stop_times = trial_stop_times - starting_timestamp for trials_dict in metadata["Behavior"]["Trials"]: name = trials_dict["name"] + dtype = trials_dict["dtype"] trial_array = np.array(file["events"]["push"][name]).squeeze() - name_to_trial_array[name] = trial_array + if dtype == "bool": + trial_array[np.isnan(trial_array)] = False + trial_array = np.asarray(trial_array, dtype=dtype) # Can't cast to dtype right away bc bool(nan) = True + name_to_trial_array[name] = trial_array[~trial_is_nan] # Add Data to NWBFile behavior_module = nwb_helpers.get_module( @@ -163,6 +182,12 @@ def add_to_nwbfile(self, nwbfile: NWBFile, metadata: dict): ) for event_dict in metadata["Behavior"]["Events"]: event_times = name_to_times[event_dict["name"]] + if np.all(np.isnan(event_times)): + if verbose: + print( + f"An event provided in the metadata ({event_dict['name']}) will be skipped because no times were found." + ) + continue # Skip if all times are NaNs event_type = event_type_name_to_row[event_dict["name"]] for event_time in event_times: events_table.add_row(timestamp=event_time, event_type=event_type) @@ -174,12 +199,20 @@ def add_to_nwbfile(self, nwbfile: NWBFile, metadata: dict): valued_events_table.add_column(name="value", description="Value of the event.") for event_dict in metadata["Behavior"]["ValuedEvents"]: event_times = name_to_times[event_dict["name"]] + if np.all(np.isnan(event_times)): + if verbose: + print( + f"An event provided in the metadata ({event_dict['name']}) will be skipped because no times were found." + ) + continue # Skip if all times are NaNs event_values = name_to_values[event_dict["name"]] event_type = event_type_name_to_row[event_dict["name"]] for event_time, event_value in zip(event_times, event_values): valued_events_table.add_row(timestamp=event_time, event_type=event_type, value=event_value) - behavior_module.add(events_table) - behavior_module.add(valued_events_table) + if len(events_table) > 0: + behavior_module.add(events_table) + if len(valued_events_table) > 0: + behavior_module.add(valued_events_table) task = Task(event_types=event_types_table) nwbfile.add_lab_meta_data(task) @@ -192,6 +225,15 @@ def add_to_nwbfile(self, nwbfile: NWBFile, metadata: dict): trial_array = name_to_trial_array[name] nwbfile.add_trial_column(name=name, description=trials_dict["description"], data=trial_array) + # Add Epochs Table + nwbfile.add_epoch(start_time=trial_start_times[0], stop_time=trial_stop_times[-1], tags=["Active Behavior"]) + if len(valued_events_table) > 0: + nwbfile.add_epoch( + start_time=valued_events_table["timestamp"][0], + stop_time=valued_events_table["timestamp"][-1], + tags=["Passive Listening"], + ) + # Add Devices for device_kwargs in metadata["Behavior"]["Devices"]: device = Device(**device_kwargs) diff --git a/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_convert_all_sessions.py b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_convert_all_sessions.py new file mode 100644 index 0000000..467ca54 --- /dev/null +++ b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_convert_all_sessions.py @@ -0,0 +1,164 @@ +"""Primary script to run to convert all sessions in a dataset using session_to_nwb.""" +from pathlib import Path +from concurrent.futures import ProcessPoolExecutor, as_completed +from pprint import pformat +import traceback +from tqdm import tqdm +import shutil + +from schneider_lab_to_nwb.zempolich_2024.zempolich_2024_convert_session import session_to_nwb + + +def dataset_to_nwb( + *, + data_dir_path: str | Path, + output_dir_path: str | Path, + max_workers: int = 1, + verbose: bool = True, +): + """Convert the entire dataset to NWB. + + Parameters + ---------- + data_dir_path : str | Path + The path to the directory containing the raw data. + output_dir_path : str | Path + The path to the directory where the NWB files will be saved. + max_workers : int, optional + The number of workers to use for parallel processing, by default 1 + verbose : bool, optional + Whether to print verbose output, by default True + """ + data_dir_path = Path(data_dir_path) + output_dir_path = Path(output_dir_path) + session_to_nwb_kwargs_per_session = get_session_to_nwb_kwargs_per_session(data_dir_path=data_dir_path) + + futures = [] + with ProcessPoolExecutor(max_workers=max_workers) as executor: + for session_to_nwb_kwargs in session_to_nwb_kwargs_per_session: + session_to_nwb_kwargs["output_dir_path"] = output_dir_path + session_to_nwb_kwargs["verbose"] = verbose + nwbfile_name = get_nwbfile_name_from_kwargs(session_to_nwb_kwargs) + exception_file_path = output_dir_path / f"ERROR_{nwbfile_name}.txt" + futures.append( + executor.submit( + safe_session_to_nwb, + session_to_nwb_kwargs=session_to_nwb_kwargs, + exception_file_path=exception_file_path, + ) + ) + for _ in tqdm(as_completed(futures), total=len(futures)): + pass + + +def get_nwbfile_name_from_kwargs(session_to_nwb_kwargs): + behavior_file_path = session_to_nwb_kwargs["behavior_file_path"] + subject_id = behavior_file_path.name.split("_")[1] + session_id = behavior_file_path.name.split("_")[2] + nwbfile_name = f"sub-{subject_id}_ses-{session_id}.nwb" + return nwbfile_name + + +def safe_session_to_nwb(*, session_to_nwb_kwargs: dict, exception_file_path: str | Path): + """Convert a session to NWB while handling any errors by recording error messages to the exception_file_path. + + Parameters + ---------- + session_to_nwb_kwargs : dict + The arguments for session_to_nwb. + exception_file_path : Path + The path to the file where the exception messages will be saved. + """ + exception_file_path = Path(exception_file_path) + try: + session_to_nwb(**session_to_nwb_kwargs) + except Exception as e: + with open(exception_file_path, mode="w") as f: + f.write(f"session_to_nwb_kwargs: \n {pformat(session_to_nwb_kwargs)}\n\n") + f.write(traceback.format_exc()) + + +def get_session_to_nwb_kwargs_per_session( + *, + data_dir_path: str | Path, +): + """Get the kwargs for session_to_nwb for each session in the dataset. + + Parameters + ---------- + data_dir_path : str | Path + The path to the directory containing the raw data. + + Returns + ------- + list[dict[str, Any]] + A list of dictionaries containing the kwargs for session_to_nwb for each session. + """ + a1_ephys_path = data_dir_path / "A1_EphysFiles" + a1_ephys_behavior_path = data_dir_path / "A1_EphysBehavioralFiles" + a1_opto_path = data_dir_path / "A1_OptoBehavioralFiles" + m2_ephys_path = data_dir_path / "M2_EphysFiles" + m2_ephys_behavior_path = data_dir_path / "M2_EphysBehavioralFiles" + m2_opto_path = data_dir_path / "M2_OptoBehavioralFiles" + + a1_kwargs = get_brain_region_kwargs( + ephys_path=a1_ephys_path, + ephys_behavior_path=a1_ephys_behavior_path, + opto_path=a1_opto_path, + brain_region="A1", + ) + m2_kwargs = get_brain_region_kwargs( + ephys_path=m2_ephys_path, + ephys_behavior_path=m2_ephys_behavior_path, + opto_path=m2_opto_path, + brain_region="M2", + ) + session_to_nwb_kwargs_per_session = a1_kwargs + m2_kwargs + + return session_to_nwb_kwargs_per_session + + +def get_brain_region_kwargs(ephys_path, ephys_behavior_path, opto_path, brain_region): + session_to_nwb_kwargs_per_session = [] + for subject_dir in ephys_path.iterdir(): + subject_id = subject_dir.name + matched_behavior_paths = sorted(ephys_behavior_path.glob(f"raw_{subject_id}_*.mat")) + sorted_session_dirs = sorted(subject_dir.iterdir()) + for ephys_folder_path, behavior_file_path in zip(sorted_session_dirs, matched_behavior_paths): + session_to_nwb_kwargs = dict( + ephys_folder_path=ephys_folder_path, + behavior_file_path=behavior_file_path, + brain_region=brain_region, + intrinsic_signal_optical_imaging_folder_path="", # TODO: Add intrinsic signal optical imaging folder path + video_folder_path="", # TODO: Add video folder path + ) + session_to_nwb_kwargs_per_session.append(session_to_nwb_kwargs) + for behavior_file_path in opto_path.iterdir(): + session_to_nwb_kwargs = dict( + behavior_file_path=behavior_file_path, + brain_region=brain_region, + has_opto=True, + intrinsic_signal_optical_imaging_folder_path="", # TODO: Add intrinsic signal optical imaging folder path + video_folder_path="", # TODO: Add video folder path + ) + session_to_nwb_kwargs_per_session.append(session_to_nwb_kwargs) + return session_to_nwb_kwargs_per_session + + +if __name__ == "__main__": + + # Parameters for conversion + data_dir_path = Path("/Volumes/T7/CatalystNeuro/Schneider/Grant Zempolich Project Data") + output_dir_path = Path("/Volumes/T7/CatalystNeuro/Schneider/conversion_nwb") + max_workers = 4 + if output_dir_path.exists(): + shutil.rmtree( + output_dir_path, ignore_errors=True + ) # ignore errors due to MacOS race condition (https://github.com/python/cpython/issues/81441) + + dataset_to_nwb( + data_dir_path=data_dir_path, + output_dir_path=output_dir_path, + max_workers=max_workers, + verbose=False, + ) diff --git a/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_convert_session.py b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_convert_session.py new file mode 100644 index 0000000..702e4fd --- /dev/null +++ b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_convert_session.py @@ -0,0 +1,194 @@ +"""Primary script to run to convert an entire session for of data using the NWBConverter.""" +from pathlib import Path +from zoneinfo import ZoneInfo +import shutil +from datetime import datetime +from typing import Optional, Literal + +from neuroconv.utils import load_dict_from_file, dict_deep_update +from schneider_lab_to_nwb.zempolich_2024 import Zempolich2024NWBConverter + + +def session_to_nwb( + behavior_file_path: str | Path, + video_folder_path: str | Path, + intrinsic_signal_optical_imaging_folder_path: str | Path, + output_dir_path: str | Path, + ephys_folder_path: Optional[str | Path] = None, + has_opto: bool = False, + brain_region: Literal["A1", "M2"] = "A1", + stub_test: bool = False, + verbose: bool = True, +): + behavior_file_path = Path(behavior_file_path) + video_folder_path = Path(video_folder_path) + intrinsic_signal_optical_imaging_folder_path = Path(intrinsic_signal_optical_imaging_folder_path) + output_dir_path = Path(output_dir_path) + video_file_paths = [ + file_path for file_path in video_folder_path.glob("*.mp4") if not file_path.name.startswith("._") + ] + video_file_paths = sorted(video_file_paths) + if stub_test: + output_dir_path = output_dir_path / "nwb_stub" + output_dir_path.mkdir(parents=True, exist_ok=True) + if ephys_folder_path is None: + has_ephys = False + else: + has_ephys = True + ephys_folder_path = Path(ephys_folder_path) + + source_data = dict() + conversion_options = dict() + + # Add Ephys Recording and Sorting + if has_ephys: + stream_name = "Signals CH" + source_data.update( + dict(Recording=dict(folder_path=ephys_folder_path, stream_name=stream_name, verbose=verbose)) + ) + conversion_options.update(dict(Recording=dict(stub_test=stub_test, brain_region=brain_region))) + + source_data.update(dict(Sorting=dict(folder_path=ephys_folder_path, verbose=verbose))) + conversion_options.update(dict(Sorting=dict())) + + # Add Behavior + source_data.update(dict(Behavior=dict(file_path=behavior_file_path))) + conversion_options.update(dict(Behavior=dict())) + + # Add Video(s) + # for i, video_file_path in enumerate(video_file_paths): + # metadata_key_name = f"VideoCamera{i+1}" + # source_data.update({metadata_key_name: dict(file_paths=[video_file_path], metadata_key_name=metadata_key_name)}) + # conversion_options.update({metadata_key_name: dict()}) + + # Add Optogenetic + if has_opto: + source_data.update(dict(Optogenetic=dict(file_path=behavior_file_path))) + conversion_options.update(dict(Optogenetic=dict(brain_region=brain_region))) + conversion_options["Behavior"]["normalize_timestamps"] = True + + # Add Intrinsic Signal Optical Imaging + # source_data.update(dict(ISOI=dict(folder_path=intrinsic_signal_optical_imaging_folder_path))) + # conversion_options.update(dict(ISOI=dict())) + + converter = Zempolich2024NWBConverter(source_data=source_data, verbose=verbose) + metadata = converter.get_metadata() + + # Update default metadata with the editable in the corresponding yaml file + editable_metadata_path = Path(__file__).parent / "zempolich_2024_metadata.yaml" + editable_metadata = load_dict_from_file(editable_metadata_path) + metadata = dict_deep_update(metadata, editable_metadata) + + add_session_start_time_to_metadata( + behavior_file_path=behavior_file_path, ephys_folder_path=ephys_folder_path, metadata=metadata + ) + + if has_ephys: + conversion_options["Sorting"]["units_description"] = metadata["Sorting"]["units_description"] + + # # Overwrite video metadata + # for i, video_file_path in enumerate(video_file_paths): + # metadata_key_name = f"VideoCamera{i+1}" + # metadata["Behavior"][metadata_key_name] = editable_metadata["Behavior"][metadata_key_name] + + subject_id = behavior_file_path.name.split("_")[1] + session_id = behavior_file_path.name.split("_")[2] + nwbfile_path = output_dir_path / f"sub-{subject_id}_ses-{session_id}.nwb" + metadata["NWBFile"]["session_id"] = session_id + metadata["Subject"]["subject_id"] = subject_id + + # Run conversion + converter.run_conversion(metadata=metadata, nwbfile_path=nwbfile_path, conversion_options=conversion_options) + + +def add_session_start_time_to_metadata( + behavior_file_path: str | Path, + ephys_folder_path: Optional[str | Path], + metadata: dict, +): + if ephys_folder_path is not None: + folder_name = ephys_folder_path.parent.name + "/" + ephys_folder_path.name + folder_name_to_start_datetime = metadata["Ecephys"].pop("folder_name_to_start_datetime") + if folder_name in folder_name_to_start_datetime.keys(): + session_start_time = datetime.fromisoformat(folder_name_to_start_datetime[folder_name]) + else: + session_start_time = metadata["NWBFile"]["session_start_time"] + else: + session_start_time = datetime.strptime(behavior_file_path.name.split("_")[2], "%y%m%d") + + EST = ZoneInfo("US/Eastern") + metadata["NWBFile"]["session_start_time"] = session_start_time.replace(tzinfo=EST) + + +def main(): + # Parameters for conversion + data_dir_path = Path("/Volumes/T7/CatalystNeuro/Schneider/Grant Zempolich Project Data") + output_dir_path = Path("/Volumes/T7/CatalystNeuro/Schneider/conversion_nwb") + stub_test = False + verbose = False + + if output_dir_path.exists(): + shutil.rmtree(output_dir_path, ignore_errors=True) + + # Example Session A1 Ephys + Behavior + ephys_folder_path = data_dir_path / "A1_EphysFiles" / "m53" / "Day1_A1" + behavior_file_path = data_dir_path / "A1_EphysBehavioralFiles" / "raw_m53_231029_001.mat" + video_folder_path = Path("") + intrinsic_signal_optical_imaging_folder_path = Path("") + session_to_nwb( + ephys_folder_path=ephys_folder_path, + behavior_file_path=behavior_file_path, + video_folder_path=video_folder_path, + intrinsic_signal_optical_imaging_folder_path=intrinsic_signal_optical_imaging_folder_path, + output_dir_path=output_dir_path, + stub_test=stub_test, + verbose=verbose, + ) + + # Example Session A1 Ogen + Behavior + behavior_file_path = data_dir_path / "A1_OptoBehavioralFiles" / "raw_m53_231013_001.mat" + video_folder_path = Path("") + intrinsic_signal_optical_imaging_folder_path = Path("") + session_to_nwb( + behavior_file_path=behavior_file_path, + video_folder_path=video_folder_path, + intrinsic_signal_optical_imaging_folder_path=intrinsic_signal_optical_imaging_folder_path, + output_dir_path=output_dir_path, + has_opto=True, + stub_test=stub_test, + verbose=verbose, + ) + + # Example Session M2 Ephys + Behavior + ephys_folder_path = data_dir_path / "M2_EphysFiles" / "m74" / "M2_Day1" + behavior_file_path = data_dir_path / "M2_EphysBehavioralFiles" / "raw_m74_240815_001.mat" + video_folder_path = Path("") + intrinsic_signal_optical_imaging_folder_path = Path("") + session_to_nwb( + ephys_folder_path=ephys_folder_path, + behavior_file_path=behavior_file_path, + video_folder_path=video_folder_path, + intrinsic_signal_optical_imaging_folder_path=intrinsic_signal_optical_imaging_folder_path, + brain_region="M2", + output_dir_path=output_dir_path, + stub_test=stub_test, + verbose=verbose, + ) + + # Example Session M2 Opto + Behavior + behavior_file_path = data_dir_path / "M2_OptoBehavioralFiles" / "raw_m74_240809_001.mat" + video_folder_path = Path("") + intrinsic_signal_optical_imaging_folder_path = Path("") + session_to_nwb( + behavior_file_path=behavior_file_path, + video_folder_path=video_folder_path, + intrinsic_signal_optical_imaging_folder_path=intrinsic_signal_optical_imaging_folder_path, + brain_region="M2", + output_dir_path=output_dir_path, + stub_test=stub_test, + verbose=verbose, + ) + + +if __name__ == "__main__": + main() diff --git a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_intrinsic_signal_imaging_interface.py b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_intrinsic_signal_imaging_interface.py similarity index 98% rename from src/schneider_lab_to_nwb/schneider_2024/schneider_2024_intrinsic_signal_imaging_interface.py rename to src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_intrinsic_signal_imaging_interface.py index ee6341f..7017f9c 100644 --- a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_intrinsic_signal_imaging_interface.py +++ b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_intrinsic_signal_imaging_interface.py @@ -12,7 +12,7 @@ from neuroconv.tools import nwb_helpers -class Schneider2024IntrinsicSignalOpticalImagingInterface(BaseDataInterface): +class Zempolich2024IntrinsicSignalOpticalImagingInterface(BaseDataInterface): """Intrinsic signal optical imaging interface for schneider_2024 conversion""" keywords = ("intrinsic signal optical imaging",) diff --git a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_metadata.yaml b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_metadata.yaml similarity index 84% rename from src/schneider_lab_to_nwb/schneider_2024/schneider_2024_metadata.yaml rename to src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_metadata.yaml index d526a33..433fada 100644 --- a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_metadata.yaml +++ b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_metadata.yaml @@ -17,6 +17,14 @@ Subject: description: 12-week-old C57BL/6 or VGATChR2-EYFP mice (see Aims approaches below for details) will be used for all behavioral, electrophysiology, and optogenetic experiments. In the VGAT-ChR2-EYFP mouse line, channelrhodopsin (ChR2) is coupled to the vesicular GABA transporter, inducing expression in GABAergic inhibitory neurons ubiquitously across cortex and allowing for real time optogenetic inhibition of brain regions of interest. strain: C57BL/6 +BrainRegion: + A1: + electrode_group_location: Primary Auditory Cortex (A1) + optogenetic_stimulus_site_location: Primary Auditory Cortex (-2.8 AP, 4.2 ML relative to bregma; guided by intrinsic optical imaging) + M2: + electrode_group_location: Secondary Motor Cortex (M2) + optogenetic_stimulus_site_location: Secondary Motor Cortex (1.0-1.5 AP, 0.5-0.7 ML) + Ecephys: Device: - name: MasmanidisSiliconMicroprobe128AxN @@ -25,11 +33,20 @@ Ecephys: ElectrodeGroup: - name: ElectrodeGroup description: ElectrodeGroup for all channels in the recording probe. - location: Primary Auditory Cortex (A1) device: MasmanidisSiliconMicroprobe128AxN ElectricalSeries: - name: ElectricalSeries description: Recording of AC neural responses in mice performing this behavioral task will utilize dense 128-channel recording probes (Masmanidis Lab). These recording probes span a depth ~1mm, allowing for sampling of all layers of cortex. Electrophysiology data will be recorded using OpenEphys Acquisition Board v2.4 and associated OpenEphys GUI software. + folder_name_to_start_datetime: + m53/Day1_A1: 2023-10-29T16:56:01 + m54/Day1_A1: 2023-10-29T18:18:03 + m65/Day2_A1: 2023-10-26T18:49:04 + m66/Day1_A1: 2023-10-25T18:41:03 # could be 2023-10-25T18:04:13 instead + m67/Day2_A1: 2023-10-28T15:54:01 # could be 2023-10-28T15:05:14 instead + m71/Day2_A1: 2023-11-01T18:04:30 # could be 2023-11-01T18:43:00 instead + m72/Day2_A1: 2023-10-21T20:03:27 # could be 2023-10-21T20:32:07 instead + m79/M2_Day1: 2024-08-19T15:58:07 + m81/M2_Day1: 2024-08-17T17:59:09 Behavior: Module: @@ -61,29 +78,37 @@ Behavior: manufacturer: Schneider Lab Trials: - name: rewarded - description: Indicates if trial was rewarded (NaN = trial not rewarded, 1 = trial rewarded). + description: Indicates if trial was rewarded (False = trial not rewarded, True = trial rewarded). + dtype: bool - name: time_reward_s description: Time of reward if rewarded, otherwise NaN. + dtype: float64 - name: opto_trial - description: Indicates if trial was an optogenetic stimulation trial (NaN = non opto trial, 1 = opto trial). + description: Indicates if trial was an optogenetic stimulation trial (False = non opto trial, True = opto trial). + dtype: bool - name: opto_time description: Time of optogenetic stimulation if opto trial, otherwise NaN. + dtype: float64 - name: opto_time_end description: Time of start of optogenetic stimulation if it occurs, otherwise NaN. + dtype: float64 - name: ITI_respect - description: Whether or not trial start obeyed inter trial interval wait time (300ms). + description: Whether or not trial start obeyed inter trial interval wait time of 300ms (False = trial did not obey ITI, True = trial obeyed ITI). + dtype: bool - name: ThresholdVector description: Position of start of the target zone on a given trial (in raw encoder values corresponding to value read out by quadrature encoder). + dtype: float64 - name: endZone_ThresholdVector description: Position of ending/exit position of target zone on a given trial (in raw encoder values corresponding to value read out by quadrature encoder). - VideoCamera1: - - name: video_camera_1 - description: Two IR video cameras (AAK CA20 600TVL 2.8MM) are used to monitor the experiments from different angles of interest, allowing for offline analysis of body movements, pupillometry, and other behavioral data if necessary. Camera 1 is a side angle view of the mouse. - unit: Frames - VideoCamera2: - - name: video_camera_2 - description: Two IR video cameras (AAK CA20 600TVL 2.8MM) are used to monitor the experiments from different angles of interest, allowing for offline analysis of body movements, pupillometry, and other behavioral data if necessary. Camera 2 is a zoomed-in view of the pupil of the mouse. - unit: Frames + dtype: float64 + # VideoCamera1: + # - name: video_camera_1 + # description: Two IR video cameras (AAK CA20 600TVL 2.8MM) are used to monitor the experiments from different angles of interest, allowing for offline analysis of body movements, pupillometry, and other behavioral data if necessary. Camera 1 is a side angle view of the mouse. + # unit: Frames + # VideoCamera2: + # - name: video_camera_2 + # description: Two IR video cameras (AAK CA20 600TVL 2.8MM) are used to monitor the experiments from different angles of interest, allowing for offline analysis of body movements, pupillometry, and other behavioral data if necessary. Camera 2 is a zoomed-in view of the pupil of the mouse. + # unit: Frames Sorting: units_description: Neural spikes will be sorted offline using Kilosort 2.5 and Phy2 software and manually curated to ensure precise spike time acquisition. diff --git a/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_notes.md b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_notes.md new file mode 100644 index 0000000..4a304a5 --- /dev/null +++ b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_notes.md @@ -0,0 +1,43 @@ +# Notes concerning the schneider_2024 conversion + +## Behavior + +## Video + +## Optogenetics +- is_opto_trial in the trials table is `np.logical_not(np.isnan(onset_times))` rather than reading from the .mat file + to ensure consistency with the onset/offset times. +- injection vs stimulation location(s) for A1 vs M2??? + +## Intrinsic Signal Optical Imaging +- Just including raw blood vessel image and processed overlay + pixel locations bc including the isoi roi response series would really require an extension for context, but seems like it has limited reuse potential. +- Used the Audette paper for description of overlay image. +- Need pixel locs for ephys +- Need device info for 2p microscope and red light laser +- Why is the overlaid image flipped left/right compared to the original? + +## Temporal Alignment +- For session A1/m53/Day1 (raw_m53_231029_001.mat), + - ephys data starts at 1025s with duration 2700s + - units table runs from 0-2700 + - behavioral time series (lick and encoder) run from 1187s to 2017s + - events table (toneIN, toneOUT, targetOUT, valve) run from 1191s to 1954s + - valued events table () runs from 2017 to 2164 + - trials table () runs from 1191 to 1993 + --> conclusion: something is wrong with ephys start time --> ignoring it +- Want to split data into epochs: Active Behavior, Passive Listening, ??? What is happening post-2164? Before 1187s? +- For opto sessions, what is the session start time? Ex. What is file['metadata']['session_beginning'] (=129765.7728241)? +- Looks like opto sessions are not temporally aligned (no concurrent ephys and timestamp start at large numbers (142697.1119976)) --> normalizing those sessions times to first encoder timestamp. + + +## Active Requests +- Mice sexes +- Video for each session and ISOI data for each animal +- pixel locs for ephys +- ISOI device info for 2p microscope and red light laser + +## Questions for Midway Meeting +- injection vs stimulation location(s) for A1 vs M2??? +- Why is the overlaid image flipped left/right compared to the original? +- Want to split data into epochs: Active Behavior, Passive Listening, ??? What is happening post-2164? Before 1187s? +- Double Check: Is it ok to normalize opto sessions to first encoder timestamp? diff --git a/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_nwbconverter.py b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_nwbconverter.py new file mode 100644 index 0000000..7a67805 --- /dev/null +++ b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_nwbconverter.py @@ -0,0 +1,28 @@ +"""Primary NWBConverter class for this dataset.""" +from neuroconv import NWBConverter +from neuroconv.datainterfaces import ( + PhySortingInterface, + VideoInterface, +) +from neuroconv.basedatainterface import BaseDataInterface + +from schneider_lab_to_nwb.zempolich_2024 import ( + Zempolich2024OpenEphysRecordingInterface, + Zempolich2024BehaviorInterface, + Zempolich2024OptogeneticInterface, + Zempolich2024IntrinsicSignalOpticalImagingInterface, +) + + +class Zempolich2024NWBConverter(NWBConverter): + """Primary conversion class for my extracellular electrophysiology dataset.""" + + data_interface_classes = dict( + Recording=Zempolich2024OpenEphysRecordingInterface, + Sorting=PhySortingInterface, + Behavior=Zempolich2024BehaviorInterface, + VideoCamera1=VideoInterface, + VideoCamera2=VideoInterface, + Optogenetic=Zempolich2024OptogeneticInterface, + ISOI=Zempolich2024IntrinsicSignalOpticalImagingInterface, + ) diff --git a/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_open_ephys_recording_interface.py b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_open_ephys_recording_interface.py new file mode 100644 index 0000000..b36045f --- /dev/null +++ b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_open_ephys_recording_interface.py @@ -0,0 +1,35 @@ +"""Primary class for converting SpikeGadgets Ephys Recordings.""" +from pynwb.file import NWBFile +import numpy as np +from typing import Literal + +from neuroconv.datainterfaces import OpenEphysLegacyRecordingInterface +from spikeinterface.extractors import OpenEphysLegacyRecordingExtractor + + +class Zempolich2024OpenEphysRecordingInterface(OpenEphysLegacyRecordingInterface): + """OpenEphys RecordingInterface for zempolich_2024 conversion.""" + + Extractor = OpenEphysLegacyRecordingExtractor + + def get_metadata(self) -> dict: + metadata = super().get_metadata() + metadata["Ecephys"]["Device"] = [] # remove default device + return metadata + + def add_to_nwbfile( + self, nwbfile: NWBFile, metadata: dict, brain_region: Literal["A1", "M2"] = "A1", **conversion_options + ): + folder_path = self.source_data["folder_path"] + channel_positions = np.load(folder_path / "channel_positions.npy") + if True: # TODO: Replace with `if stub_test:` once all channels are present in the data + channel_positions = channel_positions[:1, :] + location = metadata["BrainRegion"][brain_region]["electrode_group_location"] + for electrode_group in metadata["Ecephys"]["ElectrodeGroup"]: + electrode_group["location"] = location + channel_ids = self.recording_extractor.get_channel_ids() + self.recording_extractor.set_channel_locations(channel_ids=channel_ids, locations=channel_positions) + self.recording_extractor.set_property(key="brain_area", ids=channel_ids, values=[location] * len(channel_ids)) + self.recording_extractor._recording_segments[0].t_start = 0.0 + + super().add_to_nwbfile(nwbfile=nwbfile, metadata=metadata, **conversion_options) diff --git a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_optogeneticinterface.py b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_optogeneticinterface.py similarity index 90% rename from src/schneider_lab_to_nwb/schneider_2024/schneider_2024_optogeneticinterface.py rename to src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_optogeneticinterface.py index 46b2930..812f24d 100644 --- a/src/schneider_lab_to_nwb/schneider_2024/schneider_2024_optogeneticinterface.py +++ b/src/schneider_lab_to_nwb/zempolich_2024/zempolich_2024_optogeneticinterface.py @@ -1,6 +1,7 @@ """Primary class for converting optogenetic stimulation.""" from pynwb.file import NWBFile from pydantic import FilePath +from typing import Literal import numpy as np from pymatreader import read_mat from pynwb.device import Device @@ -10,7 +11,7 @@ from neuroconv.utils import DeepDict -class Schneider2024OptogeneticInterface(BaseDataInterface): +class Zempolich2024OptogeneticInterface(BaseDataInterface): """Optogenetic interface for schneider_2024 conversion""" keywords = ["optogenetics"] @@ -27,7 +28,7 @@ def get_metadata_schema(self) -> dict: metadata_schema = super().get_metadata_schema() return metadata_schema - def add_to_nwbfile(self, nwbfile: NWBFile, metadata: dict): + def add_to_nwbfile(self, nwbfile: NWBFile, metadata: dict, brain_region: Literal["A1", "M2"] = "A1"): # Read Data file_path = self.source_data["file_path"] file = read_mat(file_path) @@ -56,7 +57,7 @@ def add_to_nwbfile(self, nwbfile: NWBFile, metadata: dict): # Add OptogeneticStimulusSite site_metadata = metadata["Optogenetics"]["OptogeneticStimulusSite"] - location = f"Injection location: {site_metadata['injection_location']} \n Stimulation location: {site_metadata['stimulation_location']}" + location = metadata["BrainRegion"][brain_region]["optogenetic_stimulus_site_location"] ogen_site = OptogeneticStimulusSite( name=site_metadata["name"], device=device,