From 32c05980c080ade55d83e01a6e2375587426cfbc Mon Sep 17 00:00:00 2001 From: Miles Wells Date: Tue, 20 Feb 2024 11:13:15 +0200 Subject: [PATCH 1/3] Moved task QC viewer to ibllib.qc.task_qc_viewer --- task_qc_viewer/README.md | 63 --------- task_qc_viewer/ViewEphysQC.py | 182 ------------------------- task_qc_viewer/__init__.py | 0 task_qc_viewer/requirements.txt | 8 -- task_qc_viewer/task_qc.py | 226 -------------------------------- task_qc_viewer/version.py | 1 - 6 files changed, 480 deletions(-) delete mode 100644 task_qc_viewer/README.md delete mode 100644 task_qc_viewer/ViewEphysQC.py delete mode 100644 task_qc_viewer/__init__.py delete mode 100644 task_qc_viewer/requirements.txt delete mode 100644 task_qc_viewer/task_qc.py delete mode 100644 task_qc_viewer/version.py diff --git a/task_qc_viewer/README.md b/task_qc_viewer/README.md deleted file mode 100644 index e413ef5..0000000 --- a/task_qc_viewer/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# Task QC Viewer -This will download the TTL pulses and data collected on Bpod and/or FPGA and plot the results -alongside an interactive table. -The UUID is the session id. - -## Setup -Needs the iblenv installed properly. Follow this guide for setup: https://github.com/int-brain-lab/iblenv#iblenv - - -## Usage: command line -If on the server PC, activate the environment by typing: -``` -conda activate iblenv -``` -Otherwise, activate the iblenv as described in the guide above. - -Go into the iblapps directory that you cloned: -``` -cd /home/olivier/Documents/PYTHON/iblapps/task_qc_viewer -git checkout develop -git pull -``` -Launch the Viewer by typing `ipython task_qc.py session_UUID` , example: -``` -python task_qc.py c9fec76e-7a20-4da4-93ad-04510a89473b -# or with ipython -ipython task_qc.py -- c9fec76e-7a20-4da4-93ad-04510a89473b -``` - -Or just using a local path (on a local server for example): -``` -python task_qc.py /mnt/s0/Subjects/KS022/2019-12-10/001 --local -# or with ipython -ipython task_qc.py -- /mnt/s0/Subjects/KS022/2019-12-10/001 --local -``` - -## Usage: from ipython prompt -``` python -from iblapps.task_qc_viewer.task_qc import show_session_task_qc -session_path = "/datadisk/Data/IntegrationTests/ephys/choice_world_init/KS022/2019-12-10/001" -show_session_task_qc(session_path, local=True) -``` - -## Plots -1) Sync pulse display: -- TTL sync pulses (as recorded on the Bpod or FPGA for ephys sessions) for some key apparatus (i -.e. frame2TTL, audio signal). TTL pulse trains are displayed in black (time on x-axis, voltage on y-axis), offset by an increment of 1 each time (e.g. audio signal is on line 3, cf legend). -- trial event types, vertical lines (marked in different colours) - -2) Wheel display: -- the wheel position in radians -- trial event types, vertical lines (marked in different colours) - -3) Interactive table: -Each row is a trial entry. Each column is a trial event - -When double-clicking on any field of that table, the Sync pulse display time (x-) axis is adjusted so as to visualise the corresponding trial selected. - -### What to look for -Tests are defined in the SINGLE METRICS section of ibllib/qc/task_metrics.py: https://github.com/int-brain-lab/ibllib/blob/master/ibllib/qc/task_metrics.py#L148-L149 - -### Exit -Close the GUI window containing the interactive table to exit. diff --git a/task_qc_viewer/ViewEphysQC.py b/task_qc_viewer/ViewEphysQC.py deleted file mode 100644 index 1bd7803..0000000 --- a/task_qc_viewer/ViewEphysQC.py +++ /dev/null @@ -1,182 +0,0 @@ -import logging - -from PyQt5 import QtCore, QtWidgets -from matplotlib.figure import Figure -from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT -import pandas as pd -import numpy as np - -import qt as qt - -_logger = logging.getLogger('ibllib') - - -class DataFrameModel(QtCore.QAbstractTableModel): - DtypeRole = QtCore.Qt.UserRole + 1000 - ValueRole = QtCore.Qt.UserRole + 1001 - - def __init__(self, df=pd.DataFrame(), parent=None): - super(DataFrameModel, self).__init__(parent) - self._dataframe = df - - def setDataFrame(self, dataframe): - self.beginResetModel() - self._dataframe = dataframe.copy() - self.endResetModel() - - def dataFrame(self): - return self._dataframe - - dataFrame = QtCore.pyqtProperty(pd.DataFrame, fget=dataFrame, fset=setDataFrame) - - @QtCore.pyqtSlot(int, QtCore.Qt.Orientation, result=str) - def headerData(self, section: int, orientation: QtCore.Qt.Orientation, - role: int = QtCore.Qt.DisplayRole): - if role == QtCore.Qt.DisplayRole: - if orientation == QtCore.Qt.Horizontal: - return self._dataframe.columns[section] - else: - return str(self._dataframe.index[section]) - return QtCore.QVariant() - - def rowCount(self, parent=QtCore.QModelIndex()): - if parent.isValid(): - return 0 - return len(self._dataframe.index) - - def columnCount(self, parent=QtCore.QModelIndex()): - if parent.isValid(): - return 0 - return self._dataframe.columns.size - - def data(self, index, role=QtCore.Qt.DisplayRole): - if (not index.isValid() or not (0 <= index.row() < self.rowCount() and - 0 <= index.column() < self.columnCount())): - return QtCore.QVariant() - row = self._dataframe.index[index.row()] - col = self._dataframe.columns[index.column()] - dt = self._dataframe[col].dtype - - val = self._dataframe.iloc[row][col] - if role == QtCore.Qt.DisplayRole: - return str(val) - elif role == DataFrameModel.ValueRole: - return val - if role == DataFrameModel.DtypeRole: - return dt - return QtCore.QVariant() - - def roleNames(self): - roles = { - QtCore.Qt.DisplayRole: b'display', - DataFrameModel.DtypeRole: b'dtype', - DataFrameModel.ValueRole: b'value' - } - return roles - - def sort(self, col, order): - """ - Sort table by given column number - :param col: the column number selected (between 0 and self._dataframe.columns.size) - :param order: the order to be sorted, 0 is descending; 1, ascending - :return: - """ - self.layoutAboutToBeChanged.emit() - col_name = self._dataframe.columns.values[col] - # print('sorting by ' + col_name) - self._dataframe.sort_values(by=col_name, ascending=not order, inplace=True) - self._dataframe.reset_index(inplace=True, drop=True) - self.layoutChanged.emit() - - -class PlotCanvas(FigureCanvasQTAgg): - - def __init__(self, parent=None, width=5, height=4, dpi=100, wheel=None): - fig = Figure(figsize=(width, height), dpi=dpi) - - FigureCanvasQTAgg.__init__(self, fig) - self.setParent(parent) - - FigureCanvasQTAgg.setSizePolicy( - self, - QtWidgets.QSizePolicy.Expanding, - QtWidgets.QSizePolicy.Expanding) - FigureCanvasQTAgg.updateGeometry(self) - if wheel: - self.ax, self.ax2 = fig.subplots( - 2, 1, gridspec_kw={'height_ratios': [2, 1]}, sharex=True) - else: - self.ax = fig.add_subplot(111) - self.draw() - - -class PlotWindow(QtWidgets.QWidget): - def __init__(self, parent=None, wheel=None): - QtWidgets.QWidget.__init__(self, parent=None) - self.canvas = PlotCanvas(wheel=wheel) - self.vbl = QtWidgets.QVBoxLayout() # Set box for plotting - self.vbl.addWidget(self.canvas) - self.setLayout(self.vbl) - self.vbl.addWidget(NavigationToolbar2QT(self.canvas, self)) - - -class GraphWindow(QtWidgets.QWidget): - def __init__(self, parent=None, wheel=None): - QtWidgets.QWidget.__init__(self, parent=None) - vLayout = QtWidgets.QVBoxLayout(self) - hLayout = QtWidgets.QHBoxLayout() - self.pathLE = QtWidgets.QLineEdit(self) - hLayout.addWidget(self.pathLE) - self.loadBtn = QtWidgets.QPushButton("Select File", self) - hLayout.addWidget(self.loadBtn) - vLayout.addLayout(hLayout) - self.pandasTv = QtWidgets.QTableView(self) - vLayout.addWidget(self.pandasTv) - self.loadBtn.clicked.connect(self.loadFile) - self.pandasTv.setSortingEnabled(True) - self.pandasTv.doubleClicked.connect(self.tv_double_clicked) - self.wplot = PlotWindow(wheel=wheel) - self.wplot.show() - self.wheel = wheel - - def loadFile(self): - fileName, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Open File", "", - "CSV Files (*.csv)") - self.pathLE.setText(fileName) - df = pd.read_csv(fileName) - self.update_df(df) - - def update_df(self, df): - model = DataFrameModel(df) - self.pandasTv.setModel(model) - self.wplot.canvas.draw() - - def tv_double_clicked(self): - df = self.pandasTv.model()._dataframe - ind = self.pandasTv.currentIndex() - start = df.loc[ind.row()]['intervals_0'] - finish = df.loc[ind.row()]['intervals_1'] - dt = finish - start - if self.wheel: - idx = np.searchsorted(self.wheel['re_ts'], np.array([start - dt / 10, - finish + dt / 10])) - period = self.wheel['re_pos'][idx[0]:idx[1]] - if period.size == 0: - _logger.warning('No wheel data during trial #%i', ind.row()) - else: - min_val, max_val = np.min(period), np.max(period) - self.wplot.canvas.ax2.set_ylim(min_val - 1, max_val + 1) - self.wplot.canvas.ax2.set_xlim(start - dt / 10, finish + dt / 10) - self.wplot.canvas.ax.set_xlim(start - dt / 10, finish + dt / 10) - - self.wplot.canvas.draw() - - -def viewqc(qc=None, title=None, wheel=None): - qt.create_app() - qcw = GraphWindow(wheel=wheel) - qcw.setWindowTitle(title) - if qc is not None: - qcw.update_df(qc) - qcw.show() - return qcw diff --git a/task_qc_viewer/__init__.py b/task_qc_viewer/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/task_qc_viewer/requirements.txt b/task_qc_viewer/requirements.txt deleted file mode 100644 index de93d2b..0000000 --- a/task_qc_viewer/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -flake8 -ibllib -matplotlib -pandas -pyqt5 -pyqtgraph -ipython - diff --git a/task_qc_viewer/task_qc.py b/task_qc_viewer/task_qc.py deleted file mode 100644 index 137f725..0000000 --- a/task_qc_viewer/task_qc.py +++ /dev/null @@ -1,226 +0,0 @@ -import logging -import argparse -from itertools import cycle -import random -from collections.abc import Sized - -import pandas as pd -import qt as qt -from matplotlib.colors import TABLEAU_COLORS - -from one.api import ONE -import ibllib.plots as plots -from ibllib.io.extractors import ephys_fpga -from ibllib.qc.task_metrics import TaskQC -from ibllib.qc.task_extractors import TaskQCExtractor - -from task_qc_viewer import ViewEphysQC - -EVENT_MAP = {'goCue_times': ['#2ca02c', 'solid'], # green - 'goCueTrigger_times': ['#2ca02c', 'dotted'], # green - 'errorCue_times': ['#d62728', 'solid'], # red - 'errorCueTrigger_times': ['#d62728', 'dotted'], # red - 'valveOpen_times': ['#17becf', 'solid'], # cyan - 'stimFreeze_times': ['#0000ff', 'solid'], # blue - 'stimFreezeTrigger_times': ['#0000ff', 'dotted'], # blue - 'stimOff_times': ['#9400d3', 'solid'], # dark violet - 'stimOffTrigger_times': ['#9400d3', 'dotted'], # dark violet - 'stimOn_times': ['#e377c2', 'solid'], # pink - 'stimOnTrigger_times': ['#e377c2', 'dotted'], # pink - 'response_times': ['#8c564b', 'solid'], # brown - } -cm = [EVENT_MAP[k][0] for k in EVENT_MAP] -ls = [EVENT_MAP[k][1] for k in EVENT_MAP] -CRITICAL_CHECKS = ( - 'check_audio_pre_trial', - 'check_correct_trial_event_sequence', - 'check_error_trial_event_sequence', - 'check_n_trial_events', - 'check_response_feedback_delays', - 'check_response_stimFreeze_delays', - 'check_reward_volume_set', - 'check_reward_volumes', - 'check_stimOn_goCue_delays', - 'check_stimulus_move_before_goCue', - 'check_wheel_move_before_feedback', - 'check_wheel_freeze_during_quiescence' -) - - -_logger = logging.getLogger('ibllib') - - -class QcFrame(TaskQC): - - def __init__(self, session, bpod_only=False, local=False, one=None): - """ - Loads and extracts the QC data for a given session path - :param session: A str or Path to a session, or a session eid - :param bpod_only: When True all data is extracted from Bpod instead of FPGA for ephys - """ - if not isinstance(session, TaskQC): - one = one or ONE() - super().__init__(session, one=one, log=_logger) - - if local: - dsets, out_files = ephys_fpga.extract_all(session, save=True) - self.extractor = TaskQCExtractor(session, lazy=True, one=one) - # Extract extra datasets required for QC - self.extractor.data = dsets - self.extractor.extract_data() - # Aggregate and update Alyx QC fields - self.run(update=False) - else: - self.load_data(bpod_only=bpod_only) - self.compute() - else: - assert session.extractor and session.metrics, 'Please run QC before passing to QcFrame' - super().__init__(session.eid or session.session_path, one=session.one, log=session.log) - for attr in ('criteria', 'criteria', '_outcome', 'extractor', 'metrics', 'passed'): - setattr(self, attr, getattr(session, attr)) - self.n_trials = self.extractor.data['intervals'].shape[0] - self.wheel_data = {'re_pos': self.extractor.data.pop('wheel_position'), - 're_ts': self.extractor.data.pop('wheel_timestamps')} - - # Print failed - outcome, results, outcomes = self.compute_session_status() - map = {k: [] for k in set(outcomes.values())} - for k, v in outcomes.items(): - map[v].append(k[6:]) - for k, v in map.items(): - if k == 'PASS': - continue - print(f'The following checks were labelled {k}:') - print('\n'.join(v), '\n') - - print('The following *critical* checks did not pass:') - critical_checks = [f'_{x.replace("check", "task")}' for x in CRITICAL_CHECKS] - for k, v in outcomes.items(): - if v != 'PASS' and k in critical_checks: - print(k[6:]) - - # Make DataFrame from the trail level metrics - def get_trial_level_failed(d): - new_dict = {k[6:]: v for k, v in d.items() if - isinstance(v, Sized) and len(v) == self.n_trials} - return pd.DataFrame.from_dict(new_dict) - - self.frame = get_trial_level_failed(self.metrics) - self.frame['intervals_0'] = self.extractor.data['intervals'][:, 0] - self.frame['intervals_1'] = self.extractor.data['intervals'][:, 1] - self.frame.insert(loc=0, column='trial_no', value=self.frame.index) - - def create_plots(self, axes, - wheel_axes=None, trial_events=None, color_map=None, linestyle=None): - """ - Plots the data for bnc1 (sound) and bnc2 (frame2ttl) - :param axes: An axes handle on which to plot the TTL events - :param wheel_axes: An axes handle on which to plot the wheel trace - :param trial_events: A list of Bpod trial events to plot, e.g. ['stimFreeze_times'], - if None, valve, sound and stimulus events are plotted - :param color_map: A color map to use for the events, default is the tableau color map - linestyle: A line style map to use for the events, default is random. - :return: None - """ - color_map = color_map or TABLEAU_COLORS.keys() - if trial_events is None: - # Default trial events to plot as vertical lines - trial_events = [ - 'goCue_times', - 'goCueTrigger_times', - 'feedback_times', - 'stimFreeze_times', - 'stimOff_times', - 'stimOn_times' - ] - - plot_args = { - 'ymin': 0, - 'ymax': 4, - 'linewidth': 2, - 'ax': axes - } - - bnc1 = self.extractor.frame_ttls - bnc2 = self.extractor.audio_ttls - trial_data = self.extractor.data - - if bnc1['times'].size: - plots.squares(bnc1['times'], bnc1['polarities'] * 0.4 + 1, ax=axes, color='k') - if bnc2['times'].size: - plots.squares(bnc2['times'], bnc2['polarities'] * 0.4 + 2, ax=axes, color='k') - linestyle = linestyle or random.choices(('-', '--', '-.', ':'), k=len(trial_events)) - - if self.extractor.bpod_ttls is not None: - bpttls = self.extractor.bpod_ttls - plots.squares(bpttls['times'], bpttls['polarities'] * 0.4 + 3, ax=axes, color='k') - plot_args['ymax'] = 4 - ylabels = ['', 'frame2ttl', 'sound', 'bpod', ''] - else: - plot_args['ymax'] = 3 - ylabels = ['', 'frame2ttl', 'sound', ''] - - for event, c, l in zip(trial_events, cycle(color_map), linestyle): - plots.vertical_lines(trial_data[event], label=event, color=c, linestyle=l, **plot_args) - - axes.legend(loc='upper left', fontsize='xx-small', bbox_to_anchor=(1, 0.5)) - axes.set_yticks(list(range(plot_args['ymax'] + 1))) - axes.set_yticklabels(ylabels) - axes.set_ylim([0, plot_args['ymax']]) - - if wheel_axes: - wheel_plot_args = { - 'ax': wheel_axes, - 'ymin': self.wheel_data['re_pos'].min(), - 'ymax': self.wheel_data['re_pos'].max()} - plot_args = {**plot_args, **wheel_plot_args} - - wheel_axes.plot(self.wheel_data['re_ts'], self.wheel_data['re_pos'], 'k-x') - for event, c, ln in zip(trial_events, cycle(color_map), linestyle): - plots.vertical_lines(trial_data[event], - label=event, color=c, linestyle=ln, **plot_args) - - -def show_session_task_qc(qc_or_session=None, bpod_only=False, local=False, one=None): - """ - Displays the task QC for a given session - :param qc_or_session: session_path or TaskQC object - :param bpod_only: (no FPGA) - :param local: set True for local extraction - :return: The QC object - """ - if isinstance(qc_or_session, QcFrame): - qc = qc_or_session - elif isinstance(qc_or_session, TaskQC): - qc = QcFrame(qc_or_session, one=one) - else: - qc = QcFrame(qc_or_session, bpod_only=bpod_only, local=local, one=one) - # Run QC and plot - w = ViewEphysQC.viewqc(wheel=qc.wheel_data) - qc.create_plots(w.wplot.canvas.ax, - wheel_axes=w.wplot.canvas.ax2, - trial_events=EVENT_MAP.keys(), - color_map=cm, - linestyle=ls) - # Update table and callbacks - w.update_df(qc.frame) - qt.run_app() - return qc - - -if __name__ == "__main__": - """Run TaskQC viewer with wheel data - For information on the QC checks see the QC Flags & failures document: - https://docs.google.com/document/d/1X-ypFEIxqwX6lU9pig4V_zrcR5lITpd8UJQWzW9I9zI/edit# - ipython task_qc.py c9fec76e-7a20-4da4-93ad-04510a89473b - ipython task_qc.py ./KS022/2019-12-10/001 --local - """ - # Parse parameters - parser = argparse.ArgumentParser(description='Quick viewer to see the behaviour data from' - 'choice world sessions.') - parser.add_argument('session', help='session uuid') - parser.add_argument('--bpod', action='store_true', help='run QC on Bpod data only (no FPGA)') - parser.add_argument('--local', action='store_true', help='run from disk location (lab server') - args = parser.parse_args() # returns data from the options specified (echo) - - show_session_task_qc(qc_or_session=args.session, bpod_only=args.bpod, local=args.local) diff --git a/task_qc_viewer/version.py b/task_qc_viewer/version.py deleted file mode 100644 index 1f356cc..0000000 --- a/task_qc_viewer/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = '1.0.0' From 932c1ac174e5b29636c8d3e54157d0b4e3722333 Mon Sep 17 00:00:00 2001 From: Miles Wells Date: Fri, 15 Mar 2024 14:23:20 +0200 Subject: [PATCH 2/3] project -> projects --- dlc/overview_plot_dlc.py | 800 +++++++++++++++++++-------------------- needles2/probe_model.py | 4 +- 2 files changed, 402 insertions(+), 402 deletions(-) diff --git a/dlc/overview_plot_dlc.py b/dlc/overview_plot_dlc.py index 48c4bc3..03c3407 100644 --- a/dlc/overview_plot_dlc.py +++ b/dlc/overview_plot_dlc.py @@ -24,24 +24,24 @@ import matplotlib # https://github.com/lindermanlab/ssm import os - - + + def get_all_sess_with_ME(): one = ONE() # get all bwm sessions with dlc - all_sess = one.alyx.rest('sessions', 'list', - project='ibl_neuropixel_brainwide_01', - task_protocol="ephys", + all_sess = one.alyx.rest('sessions', 'list', + projects='ibl_neuropixel_brainwide_01', + task_protocol="ephys", dataset_types='camera.ROIMotionEnergy') eids = [s['url'].split('/')[-1] for s in all_sess] - - return eids - -def get_repeated_sites(): + return eids + + +def get_repeated_sites(): one = ONE() - STR_QUERY = 'probe_insertion__session__project__name__icontains,ibl_neuropixel_brainwide_01,' \ + STR_QUERY = 'probe_insertion__session__projects__name__icontains,ibl_neuropixel_brainwide_01,' \ 'probe_insertion__session__qc__lt,50,' \ '~probe_insertion__json__qc,CRITICAL,' \ 'probe_insertion__session__n_trials__gte,400' @@ -49,14 +49,14 @@ def get_repeated_sites(): x=-2243, y=-2000, theta=15, django=STR_QUERY) eids = [s['session']['id'] for s in all_sess] - + return eids - - + + def get_bwm_sessions(): one = ONE() traj_traced = one.alyx.rest('trajectories', 'list', provenance='Planned', - django='probe_insertion__session__project__name__' + django='probe_insertion__session__projects__name__' 'icontains,ibl_neuropixel_brainwide_01,' 'probe_insertion__session__qc__lt,50,' 'probe_insertion__session__extended_qc__behavior,1,' @@ -74,117 +74,117 @@ def get_bwm_sessions(): '~probe_insertion__session__extended_qc___task_stimulus_move_before_goCue__lt,0.9,' '~probe_insertion__session__extended_qc___task_audio_pre_trial__lt,0.9') eids = [[x['session']['id'],x['probe_name']] for x in traj_traced] - return eids - - + return eids + + def check_progress(): - + one = ONE() eids = get_repeated_sites() - + s = {} comp = [] for eid in eids: - + task = one.alyx.rest('tasks', 'list', session=eid, name='EphysDLC')[0] try: s[eid] = task['version'] print(task['version']) if task['version'][:3] != '1.1': - comp.append(eid) + comp.append(eid) except: - print(eid, 'has no task version') - - return s - - + print(eid, 'has no task version') + + return s + + def download_all_dlc(): - - eids = get_repeated_sites() - one = ONE() + + eids = get_repeated_sites() + one = ONE() dataset_types = ['camera.dlc', 'camera.times'] - - for eid in eids: + + for eid in eids: try: - + a = one.list(eid,'dataset-types') # for newer iblib version do [x['dataset_type'] for x in a] if not all([x['dataset_type'] for x in a]): - print('not all data available') + print('not all data available') continue - - - one.load(eid, dataset_types = dataset_types) + + + one.load(eid, dataset_types = dataset_types) except: - continue - - + continue + + def constant_reaction_time(eid, rt, st,stype='stim'): ''' getting trial numbers, feedback time interval ''' - - one = ONE() + + one = ONE() if stype == 'motion': wheelMoves = one.load_object(eid, 'wheelMoves') trials = one.load_object(eid, 'trials') - d = {} # dictionary, trial number and still interval + d = {} # dictionary, trial number and still interval evts = ['goCue_times', 'feedback_times', 'probabilityLeft', 'choice', 'feedbackType'] - + for tr in range(len(trials['intervals'])): if stype == 'motion': a = wheelMoves['intervals'][:,0] - b = trials['goCue_times'][tr] + b = trials['goCue_times'][tr] c = trials['feedback_times'][tr] ch = trials['choice'][tr] pl = trials['probabilityLeft'][tr] ft = trials['feedbackType'][tr] - - + + if any(np.isnan([trials[k][tr] for k in evts])): continue - - + + if c-b>10: # discard too long trials - continue - - if stype == 'motion': + continue + + if stype == 'motion': # making sure the motion onset time is in a coupled interval ind = np.where((a > b) & (a < c), True, False) if all(~ind): #print(f'non-still start, trial {tr} and eid {eid}') continue - - a = a[ind][0] - react = np.round(a - b,3) - + + a = a[ind][0] + react = np.round(a - b,3) + if np.isnan(trials['contrastLeft'][tr]): - cont = trials['contrastRight'][tr] + cont = trials['contrastRight'][tr] side = 0 - else: - cont = trials['contrastLeft'][tr] - side = 1 - + else: + cont = trials['contrastLeft'][tr] + side = 1 + if stype == 'feedback': d[tr] = [c + st,rt, cont, side, ch, ft] - if stype == 'stim': + if stype == 'stim': d[tr] = [b + st,rt, cont, side, ch, ft] if stype == 'motion': - d[tr] = [a + st,rt, cont, side, ch, ft] - - + d[tr] = [a + st,rt, cont, side, ch, ft] + + print(f"cut {len(d)} of {len(trials['intervals'])} full trials segments") - return d + return d + - def get_dlc_XYs(eid, video_type): - #video_type = 'left' - one = ONE() - Times = one.load_dataset(eid,f'alf/_ibl_{video_type}Camera.times.npy') + #video_type = 'left' + one = ONE() + Times = one.load_dataset(eid,f'alf/_ibl_{video_type}Camera.times.npy') cam = one.load_dataset(eid,f'alf/_ibl_{video_type}Camera.dlc.pqt') points = np.unique(['_'.join(x.split('_')[:-1]) for x in cam.keys()]) @@ -198,25 +198,25 @@ def get_dlc_XYs(eid, video_type): cam[point + '_likelihood'] < 0.9, cam[point + '_y']) y = y.filled(np.nan) XYs[point] = np.array( - [x, y]) + [x, y]) - return Times, XYs + return Times, XYs # a = one.list_datasets(eid) # if not all([u in a for u in dataset_types]): -# print('not all data available') +# print('not all data available') # return - + def get_ME(eid, video_type): - #video_type = 'left' - one = ONE() - - - Times = one.load_dataset(eid,f'alf/_ibl_{video_type}Camera.times.npy') + #video_type = 'left' + one = ONE() + + + Times = one.load_dataset(eid,f'alf/_ibl_{video_type}Camera.times.npy') ME = one.load_dataset(eid,f'alf/{video_type}Camera.ROIMotionEnergy.npy') - return Times, ME + return Times, ME @@ -229,19 +229,19 @@ def get_example_images(eid): # #eids = ['15f742e1-1043-45c9-9504-f1e8a53c1744'] # eids = ['4a45c8ba-db6f-4f11-9403-56e06a33dfa4'] - frts = {'body':30, 'left':60,'right':150} + frts = {'body':30, 'left':60,'right':150} + + one=ONE() + - one=ONE() - - #for eid in eids: urls = vidio.url_from_eid(eid, one=one) for video_type in frts: - - frame_idx = [5 * 60 * frts[video_type]] + + frame_idx = [5 * 60 * frts[video_type]] try: - - + + video_path = urls[video_type] frames = get_video_frames_preload(video_path, frame_idx, @@ -250,7 +250,7 @@ def get_example_images(eid): f'{eid}_{video_type}.npy', frames) print(eid, video_type, 'done') except: - print(eid, video_type,'error') + print(eid, video_type,'error') continue @@ -264,117 +264,117 @@ def get_mean_positions(XYs): mloc[point] = [int(np.nanmean(XYs[point][0])), int(np.nanmean(XYs[point][1]))] return mloc - - + + def plot_paw_on_image(eid, video_type='left', XYs = None): - #fig = plt.figure(figsize=(8,4)) - + #fig = plt.figure(figsize=(8,4)) + Cs = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52','cyan'] try: r = np.load(f'/home/mic/reproducible_dlc/example_images/' - f'{eid}_{video_type}.npy')[0] + f'{eid}_{video_type}.npy')[0] except: get_example_images(eid) r = np.load(f'/home/mic/reproducible_dlc/example_images/' - f'{eid}_{video_type}.npy')[0] - - try: - if XYs == None: - _, XYs = get_dlc_XYs(eid, video_type) - + f'{eid}_{video_type}.npy')[0] + + try: + if XYs == None: + _, XYs = get_dlc_XYs(eid, video_type) + Cs = dict(zip(XYs.keys(),Cs)) - + ds = {'body':3,'left':6,'right':15} - - + + for point in XYs:# ['paw_l','paw_r']: if point in ['tube_bottom', 'tube_top']: continue - + # downsample; normalise number of points to be the same # across all sessions xs = XYs[point][0][0::ds[video_type]] ys = XYs[point][1][0::ds[video_type]] - - plt.scatter(xs,ys, alpha = 0.05, s = 2, - label = point, c = Cs[point]) - + + plt.scatter(xs,ys, alpha = 0.05, s = 2, + label = point, c = Cs[point]) + plt.axis('off') plt.tight_layout() plt.imshow(r,cmap='gray') - plt.tight_layout() - - - if video_type != 'body': - + plt.tight_layout() + + + if video_type != 'body': + if video_type == 'left': fa = 1 - elif video_type == 'right': - fa = 0.5 - - # plot whisker pad rectangle + elif video_type == 'right': + fa = 0.5 + + # plot whisker pad rectangle mloc = get_mean_positions(XYs) p_nose = np.array(mloc['nose_tip']) - p_pupil = np.array(mloc['pupil_top_r']) + p_pupil = np.array(mloc['pupil_top_r']) # heuristic to find whisker area in side videos: - # square with side length half the distance + # square with side length half the distance # between nose and pupil and anchored on midpoint p_anchor = np.mean([p_nose,p_pupil],axis=0) squared_dist = np.sum((p_nose-p_pupil)**2, axis=0) dist = np.sqrt(squared_dist) - whxy = [int(dist/2), int(dist/3), - int(p_anchor[0] - dist/4), int(p_anchor[1])] - - rect = patches.Rectangle((whxy[2], whxy[3]), whxy[0], whxy[1], - linewidth=1, + whxy = [int(dist/2), int(dist/3), + int(p_anchor[0] - dist/4), int(p_anchor[1])] + + rect = patches.Rectangle((whxy[2], whxy[3]), whxy[0], whxy[1], + linewidth=1, edgecolor='lime', - facecolor='none') - ax = plt.gca() - ax.add_patch(rect) + facecolor='none') + ax = plt.gca() + ax.add_patch(rect) + - # plot eye region zoom as inset in upper right corner pivot = np.nanmean(XYs['pupil_top_r'], axis=1) x0 = int(pivot[0] - 33 * fa) x1 = int(pivot[0] + 33 * fa) y0 = int(pivot[1] - 28 * fa) - y1 = int(pivot[1] + 38 * fa) - + y1 = int(pivot[1] + 38 * fa) + if video_type == 'right': axins = ax.inset_axes([0, -0.5, 0.5, 0.5]) elif video_type == 'left': axins = ax.inset_axes([0.5, -0.5, 0.5, 0.5]) - + axins.spines['bottom'].set_color('white') - axins.spines['top'].set_color('white') + axins.spines['top'].set_color('white') axins.spines['right'].set_color('white') axins.spines['left'].set_color('white') axins.imshow(r, cmap='gray', origin="lower") - + for point in XYs:# ['paw_l','paw_r']: if point in ['tube_bottom', 'tube_top']: continue - + # downsample; normalise number of points to be the same # across all sessions xs = XYs[point][0][0::ds[video_type]] ys = XYs[point][1][0::ds[video_type]] - - axins.scatter(xs,ys, alpha = 1, s = 0.001, - label = point, c = Cs[point]) + + axins.scatter(xs,ys, alpha = 1, s = 0.001, + label = point, c = Cs[point]) axins.set_xlim(x0, x1) axins.set_ylim(y1, y0) axins.set_xticklabels('') axins.set_yticklabels('') #ax.indicate_inset_zoom(axins, edgecolor="white") - + # plot tongue region zoom as inset in llower right corner p1 = np.nanmean(XYs['tube_top'], axis=1) p2 = np.nanmean(XYs['tube_bottom'], axis=1) @@ -382,63 +382,63 @@ def plot_paw_on_image(eid, video_type='left', XYs = None): x0 = int(pivot[0] - 60 * fa) x1 = int(pivot[0] + 100 * fa) y0 = int(pivot[1] - 100 * fa) - y1 = int(pivot[1] + 60 * fa) - + y1 = int(pivot[1] + 60 * fa) + if video_type == 'right': axins = ax.inset_axes([0.5, -0.5, 0.5, 0.5]) elif video_type == 'left': axins = ax.inset_axes([0, -0.5, 0.5, 0.5]) - + axins.spines['bottom'].set_color('white') - axins.spines['top'].set_color('white') + axins.spines['top'].set_color('white') axins.spines['right'].set_color('white') axins.spines['left'].set_color('white') axins.imshow(r, cmap='gray', origin="upper") - + for point in XYs:# ['paw_l','paw_r']: if point in ['tube_bottom', 'tube_top']: - continue + continue # downsample; normalise number of points to be the same # across all sessions xs = XYs[point][0][0::ds[video_type]] ys = XYs[point][1][0::ds[video_type]] - - axins.scatter(xs,ys, alpha = 1, s = 0.001, - label = point, color = Cs[point]) + + axins.scatter(xs,ys, alpha = 1, s = 0.001, + label = point, color = Cs[point]) axins.set_xlim(x0, x1) axins.set_ylim(y1, y0) axins.set_xticklabels('') axins.set_yticklabels('') - #ax.indicate_inset_zoom(axins, edgecolor="white") - + #ax.indicate_inset_zoom(axins, edgecolor="white") + except: - - plt.imshow(r,cmap='gray') - ax = plt.gca() + + plt.imshow(r,cmap='gray') + ax = plt.gca() plt.text(.5, .5,'DLC is nan',color='r',fontweight='bold', bbox=dict(facecolor='white', alpha=0.5), - fontsize=10,transform=ax.transAxes) + fontsize=10,transform=ax.transAxes) plt.tight_layout() #plt.show() - plt.title(video_type) + plt.title(video_type) def plot_paw_on_imageL(eid): plot_paw_on_image(eid, video_type='left') - + def plot_paw_on_imageR(eid): - plot_paw_on_image(eid, video_type='right') - + plot_paw_on_image(eid, video_type='right') + def plot_paw_on_imageB(eid): - plot_paw_on_image(eid, video_type='body') + plot_paw_on_image(eid, video_type='body') def paw_speed_PSTH(eid): rt = 2 - st = -0.5 - fs = {'right':150,'left':60} - + st = -0.5 + fs = {'right':150,'left':60} + cols = {'left':{'1':['darkred','--'],'-1':['#1f77b4','--']}, 'right':{'1':['darkred','-'],'-1':['#1f77b4','-']}} # take speed from right paw only, i.e. closer to cam @@ -447,59 +447,59 @@ def paw_speed_PSTH(eid): for video_type in ['right','left']: times, XYs = get_dlc_XYs(eid, video_type) x = XYs['paw_r'][0] - y = XYs['paw_r'][1] + y = XYs['paw_r'][1] if video_type == 'left': #make resolution same x = x/2 y = y/2 - + # get speed in px/sec [half res] s = ((np.diff(x)**2 + np.diff(y)**2)**.5)*fs[video_type] - - - speeds[video_type] = [times,s] - - + + + speeds[video_type] = [times,s] + + # that's centered at feedback time - d = constant_reaction_time(eid, rt, st,stype='stim') - + d = constant_reaction_time(eid, rt, st,stype='stim') + sc = {'left':{'1':[],'-1':[]},'right':{'1':[],'-1':[]}} - for video_type in ['right','left']: - + for video_type in ['right','left']: + times,s = speeds[video_type] for i in d: start_idx = find_nearest(times,d[i][0]) - end_idx = find_nearest(times,d[i][0]+d[i][1]) + end_idx = find_nearest(times,d[i][0]+d[i][1]) sc[video_type][str(d[i][4])].append(s[start_idx:end_idx]) # trim on e frame if necessary - for video_type in ['right','left']: + for video_type in ['right','left']: for choice in ['-1','1']: m = min([len(x) for x in sc[video_type][choice]]) q = [x[:m] for x in sc[video_type][choice]] xs = np.arange(m) / fs[video_type] xs = np.concatenate([ -1*np.array(list(reversed(xs[:int(abs(st)*fs[video_type])]))), - np.array(xs[:int((rt - abs(st))*fs[video_type])])]) - m = min(len(xs),m) + np.array(xs[:int((rt - abs(st))*fs[video_type])])]) + m = min(len(xs),m) c = cols[video_type][choice][0] - ls = cols[video_type][choice][1] - - qm = np.nanmean(q,axis=0) - plt.plot(xs[:m], qm[:m], c = c,linestyle=ls, - linewidth = 1, + ls = cols[video_type][choice][1] + + qm = np.nanmean(q,axis=0) + plt.plot(xs[:m], qm[:m], c = c,linestyle=ls, + linewidth = 1, label = f'paw {video_type[0]},' + ' choice ' + choice) - + ax = plt.gca() ax.axvline(x=0, label='stimOn', linestyle = '--', c='g') plt.title('paw speed PSTH') plt.xlabel('time [sec]') - plt.ylabel('speed [px/sec]') - plt.legend()#bbox_to_anchor=(1.05, 1), loc='upper left') - - + plt.ylabel('speed [px/sec]') + plt.legend()#bbox_to_anchor=(1.05, 1), loc='upper left') + + def nose_speed_PSTH(eid,vtype='right'): ''' @@ -508,11 +508,11 @@ def nose_speed_PSTH(eid,vtype='right'): ''' rt = 2 - st = -0.5 - fs = {'right':150,'left':60} + st = -0.5 + fs = {'right':150,'left':60} cts = {'1':'correct trials', '-1':'incorrect trials'} - - + + cols = {'left':{'1':['k','--'],'-1':['gray','--']}, 'right':{'1':['k','-'],'-1':['gray','-']}} # take speed from right paw only, i.e. closer to cam @@ -521,63 +521,63 @@ def nose_speed_PSTH(eid,vtype='right'): for video_type in ['right','left']: times, XYs = get_dlc_XYs(eid, video_type) x = XYs['nose_tip'][0] - y = XYs['nose_tip'][1] + y = XYs['nose_tip'][1] if video_type == 'left': #make resolution same x = x/2 y = y/2 - + # get speed in px/sec [half res] s = ((np.diff(x)**2 + np.diff(y)**2)**.5)*fs[video_type] - - - speeds[video_type] = [times,s] - + + + speeds[video_type] = [times,s] + # average nose tip speeds across cams - - - + + + # that's centered at feedback time - d = constant_reaction_time(eid, rt, st,stype='feedback') - + d = constant_reaction_time(eid, rt, st,stype='feedback') + sc = {'left':{'1':[],'-1':[]},'right':{'1':[],'-1':[]}} - for video_type in ['right','left']: - + for video_type in ['right','left']: + times,s = speeds[video_type] for i in d: start_idx = find_nearest(times,d[i][0]) - end_idx = find_nearest(times,d[i][0]+d[i][1]) + end_idx = find_nearest(times,d[i][0]+d[i][1]) sc[video_type][str(d[i][5])].append(s[start_idx:end_idx]) # trim on e frame if necessary - for video_type in [vtype]: # 'right','left' + for video_type in [vtype]: # 'right','left' for choice in ['-1','1']: m = min([len(x) for x in sc[video_type][choice]]) q = [x[:m] for x in sc[video_type][choice]] xs = np.arange(m) / fs[video_type] xs = np.concatenate([ -1*np.array(list(reversed(xs[:int(abs(st)*fs[video_type])]))), - np.array(xs[:int((rt - abs(st))*fs[video_type])])]) - m = min(len(xs),m) + np.array(xs[:int((rt - abs(st))*fs[video_type])])]) + m = min(len(xs),m) c = cols[video_type][choice][0] - ls = cols[video_type][choice][1] - - qm = np.nanmean(q,axis=0) - plt.plot(xs[:m], qm[:m], c = c,linestyle=ls, - linewidth = 1, + ls = cols[video_type][choice][1] + + qm = np.nanmean(q,axis=0) + plt.plot(xs[:m], qm[:m], c = c,linestyle=ls, + linewidth = 1, label = cts[choice]) - + ax = plt.gca() ax.axvline(x=0, label='stimOn', linestyle = '--', c='r') plt.title('nose tip speed PSTH, ' f'{vtype} vid') plt.xlabel('time [sec]') - plt.ylabel('speed [px/sec]') - plt.legend(loc='lower right') - - + plt.ylabel('speed [px/sec]') + plt.legend(loc='lower right') + + def get_licks(XYs): ''' @@ -602,51 +602,51 @@ def plot_licks(eid, combine=True): ''' T_BIN = 0.02 rt = 2 - st = -0.5 - - if combine: + st = -0.5 + + if combine: # combine licking events from left and right cam lick_times = [] for video_type in ['right','left']: times, XYs = get_dlc_XYs(eid, video_type) # for the case that there are less times than DLC points r = get_licks(XYs) - idx = np.where(np.array(r) len(D): break - # split by feedback type) - if d[i][5] == 1: + # split by feedback type) + if d[i][5] == 1: licks_pos.append(D[start_idx:end_idx]) if d[i][5] == -1: - licks_neg.append(D[start_idx:end_idx]) + licks_neg.append(D[start_idx:end_idx]) + - licks_pos_ = np.array(licks_pos).mean(axis=0) licks_neg_ = np.array(licks_neg).mean(axis=0) @@ -654,70 +654,70 @@ def plot_licks(eid, combine=True): xs = np.concatenate([ -1*np.array(list(reversed(xs[:int(len(xs)*abs(st/rt))]))), np.array(xs[:int(len(xs)*(1 - abs(st/rt)))])]) - xs = xs*T_BIN + xs = xs*T_BIN plt.plot(xs, licks_pos_, c = 'k', label = 'correct trial') - plt.plot(xs, licks_neg_, c = 'gray', label = 'incorrect trial') + plt.plot(xs, licks_neg_, c = 'gray', label = 'incorrect trial') ax = plt.gca() ax.axvline(x=0, label='feedback time', linestyle = '--', c='r') plt.title('lick PSTH') plt.xlabel('time [sec]') - plt.ylabel('lick events \n [a.u.]') + plt.ylabel('lick events \n [a.u.]') plt.legend(loc='lower right') - + def lick_raster(eid, combine=True): #plt.figure(figsize=(4,4)) - + T_BIN = 0.02 rt = 2 - st = -0.5 - - if combine: + st = -0.5 + + if combine: # combine licking events from left and right cam lick_times = [] for video_type in ['right','left']: times, XYs = get_dlc_XYs(eid, video_type) r = get_licks(XYs) - idx = np.where(np.array(r) len(D): break - - # split by feedback type) - if d[i][5] == 1: - licks_pos.append(D[start_idx:end_idx]) - + + # split by feedback type) + if d[i][5] == 1: + licks_pos.append(D[start_idx:end_idx]) + licks_pos_ = np.array(licks_pos).mean(axis=0) - + y_dims, x_dims = len(licks_pos), len(licks_pos[0]) plt.imshow(licks_pos,aspect='auto', extent=[-0.5,1.5,y_dims,0], cmap='gray_r') - + ax = plt.gca() ax.set_xticks([-0.5,0,0.5,1,1.5]) ax.set_xticklabels([-0.5,0,0.5,1,1.5]) @@ -734,7 +734,7 @@ def find_nearest(array,value): if idx > 0 and (idx == len(array) or math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])): return idx-1 else: - return idx + return idx def plot_wheel_position(eid): @@ -742,55 +742,55 @@ def plot_wheel_position(eid): ''' illustrate wheel position next to distance plot ''' - T_BIN = 0.02 + T_BIN = 0.02 rt = 2 st = -0.5 - - d = constant_reaction_time(eid, rt, st) - - one = ONE() + + d = constant_reaction_time(eid, rt, st) + + one = ONE() wheel = one.load_object(eid, 'wheel') - pos, t = wh.interpolate_position(wheel.timestamps, - wheel.position, + pos, t = wh.interpolate_position(wheel.timestamps, + wheel.position, freq=1/T_BIN) whe_left = [] whe_right = [] - + for i in d: - + start_idx = find_nearest(t,d[i][0]) - end_idx = start_idx + int(d[i][1]/T_BIN) - + end_idx = start_idx + int(d[i][1]/T_BIN) + wheel_pos = pos[start_idx:end_idx] if len(wheel_pos) == 1: print(i, [start_idx,end_idx]) - + wheel_pos = wheel_pos - wheel_pos[0] - - if d[i][4] == -1: + + if d[i][4] == -1: whe_left.append(wheel_pos) if d[i][4] == 1: - whe_right.append(wheel_pos) + whe_right.append(wheel_pos) xs = np.arange(len(whe_left[0]))*T_BIN times = np.concatenate([ -1*np.array(list(reversed(xs[:int(len(xs)*abs(st/rt))]))), np.array(xs[:int(len(xs)*(1 - abs(st/rt)))])]) - + for i in range(len(whe_left)): plt.plot(times, whe_left[i],c='#1f77b4', alpha =0.5, linewidth = 0.05) - for i in range(len(whe_right)): + for i in range(len(whe_right)): plt.plot(times, whe_right[i],c='darkred', alpha =0.5, linewidth = 0.05) - plt.plot(times, np.mean(whe_left,axis=0),c='#1f77b4', + plt.plot(times, np.mean(whe_left,axis=0),c='#1f77b4', linewidth = 2, label = 'left') - plt.plot(times, np.mean(whe_right,axis=0),c='darkred', + plt.plot(times, np.mean(whe_right,axis=0),c='darkred', linewidth = 2, label = 'right') plt.axhline(y=0.26, linestyle = '--', c = 'k') - plt.axhline(y=-0.26, linestyle = '--', c = 'k', label = 'reward boundary') - plt.axvline(x=0, linestyle = '--', c = 'g', label = 'stimOn') + plt.axhline(y=-0.26, linestyle = '--', c = 'k', label = 'reward boundary') + plt.axvline(x=0, linestyle = '--', c = 'g', label = 'stimOn') axes = plt.gca() #axes.set_xlim([0,rt]) axes.set_ylim([-0.27,0.27]) @@ -798,7 +798,7 @@ def plot_wheel_position(eid): plt.ylabel('wheel position [rad]') plt.legend(loc='lower right') plt.title('wheel positions colored by choice') - plt.tight_layout() + plt.tight_layout() def get_sniffs(XYs): @@ -823,43 +823,43 @@ def plot_sniffPSTH(eid, combine=False): ''' T_BIN = 0.02 rt = 2 - st = -0.5 - - if combine: + st = -0.5 + + if combine: # combine licking events from left and right cam lick_times = [] for video_type in ['right','left']: times, XYs = get_dlc_XYs(eid, video_type) lick_times.append(times[get_sniffs(XYs)]) - + lick_times = sorted(np.concatenate(lick_times)) - + else: - times, XYs = get_dlc_XYs(eid, 'left') + times, XYs = get_dlc_XYs(eid, 'left') lick_times = times[get_sniffs(XYs)] - - + + R, t, _ = bincount2D(lick_times, np.ones(len(lick_times)), T_BIN) D = R[0] - + # that's centered at feedback time - d = constant_reaction_time(eid, rt, st,stype='feedback') - + d = constant_reaction_time(eid, rt, st,stype='feedback') + licks_pos = [] licks_neg = [] - + for i in d: - + start_idx = find_nearest(t,d[i][0]) - end_idx = start_idx + int(d[i][1]/T_BIN) + end_idx = start_idx + int(d[i][1]/T_BIN) - # split by feedback type) - if d[i][5] == 1: + # split by feedback type) + if d[i][5] == 1: licks_pos.append(D[start_idx:end_idx]) if d[i][5] == -1: - licks_neg.append(D[start_idx:end_idx]) + licks_neg.append(D[start_idx:end_idx]) + - licks_pos = np.array(licks_pos).mean(axis=0) licks_neg = np.array(licks_neg).mean(axis=0) @@ -869,14 +869,14 @@ def plot_sniffPSTH(eid, combine=False): -1*np.array(list(reversed(xs[:int(len(xs)*abs(st/rt))]))), np.array(xs[:int(len(xs)*(1 - abs(st/rt)))])]) - xs = xs*T_BIN + xs = xs*T_BIN plt.plot(xs, licks_pos, c = 'k', label = 'correct trial') - plt.plot(xs, licks_neg, c = 'gray', label = 'incorrect trial') + plt.plot(xs, licks_neg, c = 'gray', label = 'incorrect trial') ax = plt.gca() ax.axvline(x=0, label='feedback time', linestyle = '--', c='r') plt.title('sniff PSTH') plt.xlabel('time [sec]') - plt.ylabel('sniff events \n [a.u.]') + plt.ylabel('sniff events \n [a.u.]') plt.legend(loc='lower right') @@ -907,8 +907,8 @@ def dia_via_circle(p1, p2): # estimate diameter via circle assumption for side in [[t, l], [t, r], [b, l], [b, r]]: ds.append(dia_via_circle(side[0], side[1])) - - + + diam = np.nanmedian(ds, axis=0) if smooth: return smooth_pupil_diameter(diam) @@ -917,7 +917,7 @@ def dia_via_circle(p1, p2): def smooth_pupil_diameter(diam, window=31, order=3, interp_kind='cubic'): - + signal_noisy_w_nans = np.copy(diam) timestamps = np.arange(signal_noisy_w_nans.shape[0]) good_idxs = np.where(~np.isnan(signal_noisy_w_nans)) @@ -1027,9 +1027,9 @@ def non_uniform_savgol(x, y, window, polynom): #def align_left_right_pupil(eid): -# timesL, XYsL = get_dlc_XYs(eid, 'left') +# timesL, XYsL = get_dlc_XYs(eid, 'left') # dL=get_pupil_diameter(XYsL) -# timesR, XYsR = get_dlc_XYs(eid, 'right') +# timesR, XYsR = get_dlc_XYs(eid, 'right') # dR=get_pupil_diameter(XYsR) # # align time series left/right @@ -1041,11 +1041,11 @@ def non_uniform_savgol(x, y, window, polynom): # idx_aligned = np.round(interpolater(timesL)).astype(np.int) # dRa = dR[idx_aligned] -# +# # plt.plot(timesL,zscore(dL),label='left') # plt.plot(timesL,zscore(dRa),label='right') # plt.title('smoothed, aligned, z-scored right/left pupil diameter \n' -# f'{eid}') +# f'{eid}') # plt.legend(loc='lower right') # plt.ylabel('pupil diameter') # plt.xlabel('time [sec]') @@ -1060,33 +1060,33 @@ def pupil_diameter_PSTH(eid, s=None, times=None, per_trial_norm=True): ''' rt = 2 # duration of window - st = -0.5 # lag of window wrt to stype - + st = -0.5 # lag of window wrt to stype + if (s is None) or (times is None): - times, XYs = get_dlc_XYs(eid, 'left') + times, XYs = get_dlc_XYs(eid, 'left') s = get_pupil_diameter(XYs) - + D = {} - - xs = np.arange(rt*60) # number of frames + + xs = np.arange(rt*60) # number of frames xs[:int(abs(st)*60)] = -1*np.array(list(reversed(xs[:int(abs(st)*60)]))) xs[int(abs(st)*60):] = np.arange(rt*60)[1:1+len(xs[int(abs(st)*60):])] xs = xs /60. - + cols = {'stim':'r','feedback':'b','motion':'g'} - - + + for stype in ['stim','feedback']:#,'motion' # that's centered at feedback time - d = constant_reaction_time(eid, rt, st,stype=stype) - + d = constant_reaction_time(eid, rt, st,stype=stype) + D[stype] = [] - + for i in d: start_idx = find_nearest(times,d[i][0]) end_idx = start_idx + rt*60 - + if per_trial_norm: # per trial baseline - at start = 0 dia = s[start_idx:end_idx] @@ -1094,23 +1094,23 @@ def pupil_diameter_PSTH(eid, s=None, times=None, per_trial_norm=True): dia = dia - dia[0] else: dia = s[start_idx:end_idx] - - + + D[stype].append(dia) - + MEAN = np.mean(D[stype],axis=0) - STD = np.std(D[stype],axis=0)/np.sqrt(len(d)) + STD = np.std(D[stype],axis=0)/np.sqrt(len(d)) plt.plot(xs, MEAN, label=stype, color = cols[stype]) plt.fill_between(xs, MEAN + STD, MEAN - STD, color = cols[stype], alpha=0.5) - + ax = plt.gca() ax.axvline(x=0, label='align event', linestyle = '--', c='k') plt.title('left cam pupil diameter PSTH') plt.xlabel('time [sec]') - plt.ylabel('pupil diameter [px]') + plt.ylabel('pupil diameter [px]') plt.legend(loc='lower right') @@ -1123,7 +1123,7 @@ def motion_energy_PSTH(eid): ''' rt = 2 # duration of window - st = -0.5 # lag of window wrt to stype + st = -0.5 # lag of window wrt to stype stype = 'stimOn_times' ME = {} @@ -1134,10 +1134,10 @@ def motion_energy_PSTH(eid): try: for video_type in ['left','right','body']: - t,m = get_ME(eid, video_type) - m = zscore(m,nan_policy='omit') + t,m = get_ME(eid, video_type) + m = zscore(m,nan_policy='omit') - sta, end = find_nearest(t,ts), find_nearest(t,te) + sta, end = find_nearest(t,ts), find_nearest(t,te) t = t[sta:end] m = m[sta:end] @@ -1156,50 +1156,50 @@ def motion_energy_PSTH(eid): idx_aligned = np.round(interpolater(ME['body'][0])).astype(int) ME[video_type] = [ME['body'][0], ME[video_type][1][idx_aligned]] - + D = {} - + fs = 30 - xs = np.arange(rt*fs) # number of frames + xs = np.arange(rt*fs) # number of frames xs = np.concatenate([-1*np.array(list(reversed(xs[:int(abs(st)*fs)]))), np.arange(rt*fs)[1:1+len(xs[int(abs(st)*fs):])]]) xs = xs /float(fs) - + cols = {'left':'r','right':'b','body':'g'} - + for video_type in ME: # that's centered at feedback time - - + + D[video_type] = [] - + times,s = ME[video_type] - trs = trials[stype][20:-20] + trs = trials[stype][20:-20] for i in trs: start_idx = int(find_nearest(times,i) + st*30) - end_idx = int(start_idx + rt*30) + end_idx = int(start_idx + rt*30) D[video_type].append(s[start_idx:end_idx]) - + MEAN = np.mean(D[video_type],axis=0) - STD = np.std(D[video_type],axis=0)/np.sqrt(len(trs)) - + STD = np.std(D[video_type],axis=0)/np.sqrt(len(trs)) + - plt.plot(xs, MEAN, label=video_type, + plt.plot(xs, MEAN, label=video_type, color = cols[video_type], linewidth = 2) plt.fill_between(xs, MEAN + STD, MEAN - STD, color = cols[video_type], alpha=0.2) - + ax = plt.gca() ax.axvline(x=0, label='stimOn', linestyle = '--', c='k') plt.title('Motion Energy PSTH') plt.xlabel('time [sec]') - plt.ylabel('z-scored motion energy [a.u.]') - plt.legend(loc='lower right') - + plt.ylabel('z-scored motion energy [a.u.]') + plt.legend(loc='lower right') + except: plt.title('No motion energy available!') @@ -1207,7 +1207,7 @@ def motion_energy_PSTH(eid): def interp_nans(y): def nan_helper(y): - return np.isnan(y), lambda z: z.nonzero()[0] + return np.isnan(y), lambda z: z.nonzero()[0] nans, yy = nan_helper(y) y2 = copy.deepcopy(y) y2[nans] = np.interp(yy(nans), yy(~nans), y[~nans]) @@ -1228,56 +1228,56 @@ def add_panel_letter(k): def plot_all(eid): matplotlib.rcParams.update({'font.size': 10}) # report eid = '4a45c8ba-db6f-4f11-9403-56e06a33dfa4' - + panels = {'plot_paw_on_imageL':plot_paw_on_imageL, 'plot_paw_on_imageR':plot_paw_on_imageR, 'plot_paw_on_imageB':plot_paw_on_imageB, 'plot_wheel_position':plot_wheel_position, 'paw_speed_PSTH':paw_speed_PSTH, - 'plot_licks':plot_licks, + 'plot_licks':plot_licks, 'lick_raster':lick_raster, 'nose_speed_PSTH':nose_speed_PSTH, 'pupil_diameter_PSTH':pupil_diameter_PSTH, 'motion_energy_PSTH':motion_energy_PSTH} - + nrows = 2 ncols = int(np.ceil(len(panels)/2)) plt.ioff() - - plt.figure(figsize=(17,10)) - + + plt.figure(figsize=(17,10)) + k = 1 for panel in panels: - plt.subplot(nrows,ncols,k) - add_panel_letter(k) + plt.subplot(nrows,ncols,k) + add_panel_letter(k) try: panels[panel](eid) #continue except: - ax = plt.gca() + ax = plt.gca() plt.text(.5, .5,f'error in \n {panel}',color='r',fontweight='bold', bbox=dict(facecolor='white', alpha=0.5), fontsize=10, transform=ax.transAxes) k += 1 - + plt.tight_layout() - - + + # print QC outcome in title and DLC task version one = ONE() - + det = one.get_details(eid, True)['extended_qc'] p = one.eid2path(eid) s1 = '_'.join([str(p).split('/')[i] for i in [4,6,7,8]]) - + dlc_qcs = [ 'time_trace_length_match', 'trace_all_nan', 'mean_in_bbox', 'pupil_blocked', 'lick_detection'] - + raw_qcs = ['focus','position','brightness', 'resolution','timestamps','file_headers', 'wheel_alignment','camera_times','framerate', @@ -1285,45 +1285,45 @@ def plot_all(eid): qcs = ['task','behavior','videoLeft','videoRight','videoBody', - 'dlcLeft','dlcRight','dlcBody'] - - l = [] - for q in qcs: + 'dlcLeft','dlcRight','dlcBody'] + + l = [] + for q in qcs: try: if det[q] == 'FAIL': if ('dlc' in q): l.append('\n') - l.append(q+':'+str(det[q])+'-->') + l.append(q+':'+str(det[q])+'-->') video_type = q[3:] for dlc_qc in dlc_qcs: w = f'_dlc{video_type}_{dlc_qc}' if not det[w]: l.append(w+':'+str(det[w])+',') - elif ('video' in q): + elif ('video' in q): l.append('\n') - l.append(q+':'+str(det[q])+'-->') + l.append(q+':'+str(det[q])+'-->') video_type = q[5:] for raw_qc in raw_qcs: w = f'_video{video_type}_{raw_qc}' if type(det[w]) == bool: if (not det[w]): - l.append(w+':'+str(det[w])+',') - if type(det[w]) != bool: + l.append(w+':'+str(det[w])+',') + if type(det[w]) != bool: if not det[w][0]: - l.append(w+':'+str(det[w])+',') - - else: - l.append(q+':'+str(det[q])+',') + l.append(w+':'+str(det[w])+',') + + else: + l.append(q+':'+str(det[q])+',') except: continue - + s2 = ' '.join(l) - + ntrials = len(one.load_object(eid, 'trials')['goCue_times']) - + task = one.alyx.rest('tasks', 'list', session=eid, name='EphysDLC') - + try: plt.suptitle(s1+'#Trials:'+str(ntrials)+', DLC version: ' +str(task[0]['version'])+' \n '+s2, @@ -1331,23 +1331,23 @@ def plot_all(eid): except: plt.suptitle(s1+'#Trials:'+str(ntrials)+', DLC version: ??' +' \n '+s2, - backgroundcolor= 'white', fontsize=6) - - plt.tight_layout(rect=[0, 0.03, 1, 0.95]) + backgroundcolor= 'white', fontsize=6) + + plt.tight_layout(rect=[0, 0.03, 1, 0.95]) #plt.savefig(f'/home/mic/reproducible_dlc/overviewJune/{eid}.png') #plt.savefig(f'/home/mic/reproducible_dlc/all_DLC/{s1}_{eid}.png') - plt.savefig(f'/home/mic/reproducible_dlc/motor_correlate/{s1}_{eid}.png') + plt.savefig(f'/home/mic/reproducible_dlc/motor_correlate/{s1}_{eid}.png') plt.close() - + def inspection(): ''' go through computed png images to report QC issues - ''' + ''' one = ONE() - a = os.listdir('/home/mic/reproducible_dlc/overviewJune/') - + a = os.listdir('/home/mic/reproducible_dlc/overviewJune/') + for i in a: plt.figure(figsize=(15,10)) eid = i.split('.')[0] @@ -1361,6 +1361,6 @@ def inspection(): plt.show() input("Press Enter to continue...") plt.close() - - - + + + diff --git a/needles2/probe_model.py b/needles2/probe_model.py index feeec00..78c042b 100644 --- a/needles2/probe_model.py +++ b/needles2/probe_model.py @@ -72,7 +72,7 @@ def get_traj_for_provenance(self, provenance='Histology track', django=None, if prov_dict is None: prov_dict = provenance - django_base = ['probe_insertion__session__project__name__icontains,' + django_base = ['probe_insertion__session__projects__name__icontains,' 'ibl_neuropixel_brainwide_01,probe_insertion__session__json__IS_MOCK,False', 'probe_insertion__session__qc__lt,50', '~probe_insertion__json__qc,CRITICAL', @@ -107,7 +107,7 @@ def get_traj_for_provenance(self, provenance='Histology track', django=None, def get_insertions_with_xyz(self): start = time.time() - django_str = 'session__project__name__icontains,ibl_neuropixel_brainwide_01,' \ + django_str = 'session__projects__name__icontains,ibl_neuropixel_brainwide_01,' \ 'json__has_key,xyz_picks' self.ins['insertions'] = self.one.alyx.rest('insertions', 'list', django=django_str) self.ins['ids'] = np.array([ins['id'] for ins in self.ins['insertions']]) From d19986cc442749b017dfe09edba1dda87aec0ccd Mon Sep 17 00:00:00 2001 From: Mayo Faulkner Date: Wed, 27 Mar 2024 10:53:34 +0000 Subject: [PATCH 3/3] make sure probe insertion not overwritten on partial update --- atlaselectrophysiology/load_data.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/atlaselectrophysiology/load_data.py b/atlaselectrophysiology/load_data.py index 74aa80e..12701f8 100644 --- a/atlaselectrophysiology/load_data.py +++ b/atlaselectrophysiology/load_data.py @@ -42,6 +42,10 @@ def __init__(self, one=None, brain_atlas=None, testing=False, probe_id=None, self.brain_regions = self.one.alyx.rest('brain-regions', 'list') self.chn_coords = None self.chn_depths = None + # Download bwm aggregate tables for for ephys feature gui + table_path = self.one.cache_dir.joinpath('bwm_features') + s3, bucket_name = aws.get_s3_from_alyx(alyx=self.one.alyx) + aws.s3_download_folder("aggregates/bwm/latest", table_path, s3=s3, bucket_name=bucket_name) # Initialise all variables that get assigned self.sess_with_hist = None @@ -65,11 +69,6 @@ def __init__(self, one=None, brain_atlas=None, testing=False, probe_id=None, self.alyx_str = None self.sr = None - # Download bwm aggregate tables for for ephys feature gui - table_path = self.one.cache_dir.joinpath('bwm_features') - s3, bucket_name = aws.get_s3_from_alyx(alyx=self.one.alyx) - aws.s3_download_folder("aggregates/bwm/latest", table_path, s3=s3, bucket_name=bucket_name) - if probe_id is not None: self.sess = self.one.alyx.rest('insertions', 'list', id=probe_id) @@ -578,7 +577,7 @@ def update_json(self, json_data): # Get the new trajectory ephys_traj = self.one.alyx.rest('trajectories', 'list', probe_insertion=self.probe_id, provenance='Ephys aligned histology track', no_cache=True) - patch_dict = {'json': json_data} + patch_dict = {'probe_insertion': self.probe_id, 'json': json_data} self.one.alyx.rest('trajectories', 'partial_update', id=ephys_traj[0]['id'], data=patch_dict)