From 9637fb4ac20e079bcfd7a03563364435ed7f19b0 Mon Sep 17 00:00:00 2001 From: glopesdev Date: Fri, 6 Sep 2024 16:05:52 +0100 Subject: [PATCH 01/36] Apply all safe ruff fixes --- aeon/analysis/block_plotting.py | 12 +----------- aeon/dj_pipeline/__init__.py | 5 ++--- aeon/dj_pipeline/acquisition.py | 3 +-- aeon/dj_pipeline/populate/worker.py | 1 - aeon/dj_pipeline/qc.py | 2 +- aeon/dj_pipeline/scripts/clone_and_freeze_exp02.py | 9 ++++----- aeon/dj_pipeline/tracking.py | 2 +- aeon/dj_pipeline/utils/load_metadata.py | 3 +-- aeon/io/device.py | 1 + aeon/io/video.py | 2 +- aeon/qc/video.py | 4 +--- aeon/schema/core.py | 2 +- aeon/schema/dataset.py | 2 +- aeon/schema/foraging.py | 2 ++ aeon/schema/schemas.py | 3 +-- aeon/schema/social_02.py | 2 +- tests/conftest.py | 6 ++---- tests/dj_pipeline/test_pipeline_instantiation.py | 8 ++++---- tests/dj_pipeline/test_tracking.py | 3 +-- 19 files changed, 27 insertions(+), 45 deletions(-) diff --git a/aeon/analysis/block_plotting.py b/aeon/analysis/block_plotting.py index 027da966..61993cf8 100644 --- a/aeon/analysis/block_plotting.py +++ b/aeon/analysis/block_plotting.py @@ -1,17 +1,7 @@ -import os -import pathlib from colorsys import hls_to_rgb, rgb_to_hls -from contextlib import contextmanager -from pathlib import Path -import matplotlib.pyplot as plt import numpy as np -import pandas as pd import plotly -import plotly.express as px -import plotly.graph_objs as go -import seaborn as sns -from numpy.lib.stride_tricks import as_strided """Standardize subject colors, patch colors, and markers.""" @@ -36,7 +26,7 @@ ] patch_markers_symbols = ["●", "⧓", "■", "⧗", "♦", "✖", "×", "▲", "★"] patch_markers_dict = { - marker: symbol for marker, symbol in zip(patch_markers, patch_markers_symbols) + marker: symbol for marker, symbol in zip(patch_markers, patch_markers_symbols, strict=False) } patch_markers_linestyles = ["solid", "dash", "dot", "dashdot", "longdashdot"] diff --git a/aeon/dj_pipeline/__init__.py b/aeon/dj_pipeline/__init__.py index 72e57718..fc5e6fef 100644 --- a/aeon/dj_pipeline/__init__.py +++ b/aeon/dj_pipeline/__init__.py @@ -31,9 +31,8 @@ def dict_to_uuid(key) -> uuid.UUID: def fetch_stream(query, drop_pk=True): - """ - Provided a query containing data from a Stream table, - fetch and aggregate the data into one DataFrame indexed by "time" + """Provided a query containing data from a Stream table, + fetch and aggregate the data into one DataFrame indexed by "time" """ df = (query & "sample_count > 0").fetch(format="frame").reset_index() cols2explode = [ diff --git a/aeon/dj_pipeline/acquisition.py b/aeon/dj_pipeline/acquisition.py index b20c1a0c..eb2ad5ff 100644 --- a/aeon/dj_pipeline/acquisition.py +++ b/aeon/dj_pipeline/acquisition.py @@ -604,8 +604,7 @@ def _match_experiment_directory(experiment_name, path, directories): def create_chunk_restriction(experiment_name, start_time, end_time): - """ - Create a time restriction string for the chunks between the specified "start" and "end" times + """Create a time restriction string for the chunks between the specified "start" and "end" times """ start_restriction = f'"{start_time}" BETWEEN chunk_start AND chunk_end' end_restriction = f'"{end_time}" BETWEEN chunk_start AND chunk_end' diff --git a/aeon/dj_pipeline/populate/worker.py b/aeon/dj_pipeline/populate/worker.py index cbcdbb57..43b7dc64 100644 --- a/aeon/dj_pipeline/populate/worker.py +++ b/aeon/dj_pipeline/populate/worker.py @@ -48,7 +48,6 @@ def ingest_environment_visits(): """Extract and insert complete visits for experiments specified in AutomatedExperimentIngestion.""" experiment_names = AutomatedExperimentIngestion.fetch("experiment_name") # analysis.ingest_environment_visits(experiment_names) - pass # ---- Define worker(s) ---- diff --git a/aeon/dj_pipeline/qc.py b/aeon/dj_pipeline/qc.py index 0a9bd4e9..7044da0e 100644 --- a/aeon/dj_pipeline/qc.py +++ b/aeon/dj_pipeline/qc.py @@ -77,7 +77,7 @@ def make(self, key): "devices_schema_name" ), ) - stream_reader = getattr(getattr(devices_schema, device_name), "Video") + stream_reader = getattr(devices_schema, device_name).Video videodata = io_api.load( root=data_dirs, diff --git a/aeon/dj_pipeline/scripts/clone_and_freeze_exp02.py b/aeon/dj_pipeline/scripts/clone_and_freeze_exp02.py index ee1d7356..1847d8ed 100644 --- a/aeon/dj_pipeline/scripts/clone_and_freeze_exp02.py +++ b/aeon/dj_pipeline/scripts/clone_and_freeze_exp02.py @@ -57,11 +57,10 @@ def data_copy(restriction, table_block_list, batch_size=None): def validate(): - """ - Validation of schemas migration - 1. for the provided list of schema names - validate all schemas have been migrated - 2. for each schema - validate all tables have been migrated - 3. for each table, validate all entries have been migrated + """Validation of schemas migration + 1. for the provided list of schema names - validate all schemas have been migrated + 2. for each schema - validate all tables have been migrated + 3. for each table, validate all entries have been migrated """ missing_schemas = [] missing_tables = {} diff --git a/aeon/dj_pipeline/tracking.py b/aeon/dj_pipeline/tracking.py index 22ddf978..4f4d9de0 100644 --- a/aeon/dj_pipeline/tracking.py +++ b/aeon/dj_pipeline/tracking.py @@ -162,7 +162,7 @@ def make(self, key): "devices_schema_name" ), ) - stream_reader = getattr(getattr(devices_schema, device_name), "Pose") + stream_reader = getattr(devices_schema, device_name).Pose pose_data = io_api.load( root=data_dirs, diff --git a/aeon/dj_pipeline/utils/load_metadata.py b/aeon/dj_pipeline/utils/load_metadata.py index f2639c22..1d87df26 100644 --- a/aeon/dj_pipeline/utils/load_metadata.py +++ b/aeon/dj_pipeline/utils/load_metadata.py @@ -44,8 +44,7 @@ def insert_stream_types(): def insert_device_types(devices_schema: DotMap, metadata_yml_filepath: Path): - """ - Use aeon.schema.schemas and metadata.yml to insert into streams.DeviceType and streams.Device. + """Use aeon.schema.schemas and metadata.yml to insert into streams.DeviceType and streams.Device. Only insert device types that were defined both in the device schema (e.g., exp02) and Metadata.yml. It then creates new device tables under streams schema. """ diff --git a/aeon/io/device.py b/aeon/io/device.py index e68556ad..ba811c54 100644 --- a/aeon/io/device.py +++ b/aeon/io/device.py @@ -1,4 +1,5 @@ import inspect + from typing_extensions import deprecated diff --git a/aeon/io/video.py b/aeon/io/video.py index 79e43daa..cbfa0dc7 100644 --- a/aeon/io/video.py +++ b/aeon/io/video.py @@ -14,7 +14,7 @@ def frames(data): filename = None index = 0 try: - for frameidx, path in zip(data._frame, data._path): + for frameidx, path in zip(data._frame, data._path, strict=False): if filename != path: if capture is not None: capture.release() diff --git a/aeon/qc/video.py b/aeon/qc/video.py index 1e090dc9..b9a3b420 100644 --- a/aeon/qc/video.py +++ b/aeon/qc/video.py @@ -37,9 +37,7 @@ max_harp_delta = deltas.time_delta.max().total_seconds() max_camera_delta = deltas.hw_timestamp_delta.max() / 1e9 # convert nanoseconds to seconds print( - "drops: {} frameOffset: {} maxHarpDelta: {} s maxCameraDelta: {} s".format( - drop_count - frame_offset, drop_count, max_harp_delta, max_camera_delta - ) + f"drops: {drop_count - frame_offset} frameOffset: {drop_count} maxHarpDelta: {max_harp_delta} s maxCameraDelta: {max_camera_delta} s" ) stats.append((drop_count, max_harp_delta, max_camera_delta, chunk.path)) deltas.set_index(data.time, inplace=True) diff --git a/aeon/schema/core.py b/aeon/schema/core.py index f3ca95a5..6f70c8b4 100644 --- a/aeon/schema/core.py +++ b/aeon/schema/core.py @@ -1,5 +1,5 @@ -from aeon.schema.streams import Stream, StreamGroup import aeon.io.reader as _reader +from aeon.schema.streams import Stream, StreamGroup class Heartbeat(Stream): diff --git a/aeon/schema/dataset.py b/aeon/schema/dataset.py index bbb7cbb8..0facd64f 100644 --- a/aeon/schema/dataset.py +++ b/aeon/schema/dataset.py @@ -1,8 +1,8 @@ from dotmap import DotMap import aeon.schema.core as stream -from aeon.schema.streams import Device from aeon.schema import foraging, octagon +from aeon.schema.streams import Device exp02 = DotMap( [ diff --git a/aeon/schema/foraging.py b/aeon/schema/foraging.py index 82865533..0eaf593c 100644 --- a/aeon/schema/foraging.py +++ b/aeon/schema/foraging.py @@ -1,5 +1,7 @@ from enum import Enum + import pandas as pd + import aeon.io.reader as _reader import aeon.schema.core as _stream from aeon.schema.streams import Stream, StreamGroup diff --git a/aeon/schema/schemas.py b/aeon/schema/schemas.py index 74618e7f..cea128d9 100644 --- a/aeon/schema/schemas.py +++ b/aeon/schema/schemas.py @@ -1,9 +1,8 @@ from dotmap import DotMap import aeon.schema.core as stream -from aeon.schema.streams import Device from aeon.schema import foraging, octagon, social_01, social_02, social_03 - +from aeon.schema.streams import Device exp02 = DotMap( [ diff --git a/aeon/schema/social_02.py b/aeon/schema/social_02.py index 44c26c91..99a621e1 100644 --- a/aeon/schema/social_02.py +++ b/aeon/schema/social_02.py @@ -1,6 +1,6 @@ import aeon.io.reader as _reader -from aeon.schema.streams import Stream, StreamGroup from aeon.schema import core, foraging +from aeon.schema.streams import Stream, StreamGroup class Environment(StreamGroup): diff --git a/tests/conftest.py b/tests/conftest.py index 4236c3e3..7ad5beac 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,5 +1,4 @@ -""" -# run all tests: +"""# run all tests: # pytest -sv --cov-report term-missing --cov=aeon_mecha -p no:warnings tests/dj_pipeline # run one test, debug: @@ -20,8 +19,7 @@ def data_dir(): - """ - Returns test data directory + """Returns test data directory """ return os.path.join(os.path.dirname(os.path.realpath(__file__)), "data") diff --git a/tests/dj_pipeline/test_pipeline_instantiation.py b/tests/dj_pipeline/test_pipeline_instantiation.py index 30377d95..f7e06d0e 100644 --- a/tests/dj_pipeline/test_pipeline_instantiation.py +++ b/tests/dj_pipeline/test_pipeline_instantiation.py @@ -3,19 +3,19 @@ @mark.instantiation def test_pipeline_instantiation(pipeline): - + assert hasattr(pipeline["acquisition"], "FoodPatchEvent") assert hasattr(pipeline["lab"], "Arena") assert hasattr(pipeline["qc"], "CameraQC") assert hasattr(pipeline["report"], "InArenaSummaryPlot") assert hasattr(pipeline["subject"], "Subject") assert hasattr(pipeline["tracking"], "CameraTracking") - - + + @mark.instantiation def test_experiment_creation(test_params, pipeline, experiment_creation): acquisition = pipeline["acquisition"] - + experiment_name = test_params["experiment_name"] assert acquisition.Experiment.fetch1("experiment_name") == experiment_name raw_dir = ( diff --git a/tests/dj_pipeline/test_tracking.py b/tests/dj_pipeline/test_tracking.py index 5920bfd8..0aa6bb97 100644 --- a/tests/dj_pipeline/test_tracking.py +++ b/tests/dj_pipeline/test_tracking.py @@ -10,8 +10,7 @@ def save_test_data(pipeline, test_params): - """save test dataset fetched from tracking.CameraTracking.Object""" - + """Save test dataset fetched from tracking.CameraTracking.Object""" tracking = pipeline["tracking"] key = tracking.CameraTracking.Object().fetch("KEY")[index] From 97389bddf4b2fcd366a93d7f269380a465696fc9 Mon Sep 17 00:00:00 2001 From: glopesdev Date: Fri, 6 Sep 2024 16:07:40 +0100 Subject: [PATCH 02/36] Black formatting --- aeon/analysis/block_plotting.py | 12 +- aeon/dj_pipeline/acquisition.py | 3 +- .../insert_experiment_directory.ipynb | 24 ++-- aeon/dj_pipeline/docs/notebooks/diagram.ipynb | 6 +- .../social_experiments_block_analysis.ipynb | 4 +- aeon/dj_pipeline/populate/worker.py | 2 + .../scripts/clone_and_freeze_exp01.py | 1 + .../scripts/clone_and_freeze_exp02.py | 1 + .../scripts/update_timestamps_longblob.py | 1 + aeon/dj_pipeline/utils/load_metadata.py | 3 +- aeon/io/device.py | 2 +- aeon/schema/social_02.py | 10 +- aeon/util.py | 1 + .../dj_example_octagon1_experiment.ipynb | 37 +++--- ...understanding_aeon_data_architecture.ipynb | 110 ++++++++---------- tests/conftest.py | 7 +- tests/dj_pipeline/test_acquisition.py | 19 +-- .../test_pipeline_instantiation.py | 7 +- tests/dj_pipeline/test_tracking.py | 17 +-- 19 files changed, 121 insertions(+), 146 deletions(-) diff --git a/aeon/analysis/block_plotting.py b/aeon/analysis/block_plotting.py index 61993cf8..26b20cf9 100644 --- a/aeon/analysis/block_plotting.py +++ b/aeon/analysis/block_plotting.py @@ -34,19 +34,13 @@ def gen_hex_grad(hex_col, vals, min_l=0.3): """Generates an array of hex color values based on a gradient defined by unit-normalized values.""" # Convert hex to rgb to hls - h, l, s = rgb_to_hls( - *[int(hex_col.lstrip("#")[i: i + 2], 16) / 255 for i in (0, 2, 4)] - ) + h, l, s = rgb_to_hls(*[int(hex_col.lstrip("#")[i : i + 2], 16) / 255 for i in (0, 2, 4)]) grad = np.empty(shape=(len(vals),), dtype=" no changes return diff --git a/aeon/io/device.py b/aeon/io/device.py index ba811c54..cbebb42b 100644 --- a/aeon/io/device.py +++ b/aeon/io/device.py @@ -22,7 +22,7 @@ def compositeStream(pattern, *args): class Device: """Groups multiple Readers into a logical device. - If a device contains a single stream reader with the same pattern as the device `name`, it will be + If a device contains a single stream reader with the same pattern as the device `name`, it will be considered a singleton, and the stream reader will be paired directly with the device without nesting. Attributes: diff --git a/aeon/schema/social_02.py b/aeon/schema/social_02.py index 99a621e1..dd501d72 100644 --- a/aeon/schema/social_02.py +++ b/aeon/schema/social_02.py @@ -12,7 +12,9 @@ def __init__(self, path): class BlockState(Stream): def __init__(self, path): - super().__init__(_reader.Csv(f"{path}_BlockState_*", columns=["pellet_ct", "pellet_ct_thresh", "due_time"])) + super().__init__( + _reader.Csv(f"{path}_BlockState_*", columns=["pellet_ct", "pellet_ct_thresh", "due_time"]) + ) class LightEvents(Stream): def __init__(self, path): @@ -35,7 +37,11 @@ def __init__(self, path): class SubjectWeight(Stream): def __init__(self, path): - super().__init__(_reader.Csv(f"{path}_SubjectWeight_*", columns=["weight", "confidence", "subject_id", "int_id"])) + super().__init__( + _reader.Csv( + f"{path}_SubjectWeight_*", columns=["weight", "confidence", "subject_id", "int_id"] + ) + ) class Pose(Stream): diff --git a/aeon/util.py b/aeon/util.py index a9dc88bc..1028abf4 100644 --- a/aeon/util.py +++ b/aeon/util.py @@ -1,4 +1,5 @@ """Utility functions.""" + from __future__ import annotations from typing import Any diff --git a/docs/examples/dj_example_octagon1_experiment.ipynb b/docs/examples/dj_example_octagon1_experiment.ipynb index ae8e72f0..ca4cbf3a 100644 --- a/docs/examples/dj_example_octagon1_experiment.ipynb +++ b/docs/examples/dj_example_octagon1_experiment.ipynb @@ -48,9 +48,12 @@ "outputs": [], "source": [ "import datajoint as dj\n", - "dj.logger.setLevel('ERROR')\n", "\n", - "dj.config['custom']['database.prefix'] = 'aeon_test_' # data are ingested into schemas prefixed with \"aeon_test_\" for testing" + "dj.logger.setLevel(\"ERROR\")\n", + "\n", + "dj.config[\"custom\"][\n", + " \"database.prefix\"\n", + "] = \"aeon_test_\" # data are ingested into schemas prefixed with \"aeon_test_\" for testing" ] }, { @@ -85,8 +88,8 @@ "# then instead of importing the modules, you can use DataJoint's VirtualModule to access the pipeline\n", "# uncomment and run the codeblock below\n", "\n", - "#acquisition = dj.VirtualModule('acquisition', 'aeon_test_acquisition')\n", - "#streams = dj.VirtualModule('streams', 'aeon_test_streams')" + "# acquisition = dj.VirtualModule('acquisition', 'aeon_test_acquisition')\n", + "# streams = dj.VirtualModule('streams', 'aeon_test_streams')" ] }, { @@ -2665,7 +2668,7 @@ "metadata": {}, "outputs": [], "source": [ - "exp_key = {'experiment_name': 'oct1.0-r0'}" + "exp_key = {\"experiment_name\": \"oct1.0-r0\"}" ] }, { @@ -4034,7 +4037,7 @@ } ], "source": [ - "df_oscpoke = (streams.OSCPoke & 'chunk_start BETWEEN \"2022-08-01\" AND \"2022-08-03\"').fetch(format='frame')\n", + "df_oscpoke = (streams.OSCPoke & 'chunk_start BETWEEN \"2022-08-01\" AND \"2022-08-03\"').fetch(format=\"frame\")\n", "df_oscpoke" ] }, @@ -4371,7 +4374,9 @@ } ], "source": [ - "df_oscpoke.explode(['timestamps', 'typetag', 'wall_id', 'poke_id', 'reward', 'reward_interval', 'delay', 'led_delay'])" + "df_oscpoke.explode(\n", + " [\"timestamps\", \"typetag\", \"wall_id\", \"poke_id\", \"reward\", \"reward_interval\", \"delay\", \"led_delay\"]\n", + ")" ] }, { @@ -5050,9 +5055,11 @@ } ], "source": [ - "(streams.WallBeamBreak0 * streams.ExperimentWall\n", - " & 'wall_name = \"Wall4\"'\n", - " & 'chunk_start BETWEEN \"2022-08-01\" AND \"2022-08-03\"')" + "(\n", + " streams.WallBeamBreak0 * streams.ExperimentWall\n", + " & 'wall_name = \"Wall4\"'\n", + " & 'chunk_start BETWEEN \"2022-08-01\" AND \"2022-08-03\"'\n", + ")" ] }, { @@ -5195,9 +5202,11 @@ } ], "source": [ - "df_wall4_beambreak0 = (streams.WallBeamBreak0 * streams.ExperimentWall\n", - " & 'wall_name = \"Wall4\"'\n", - " & 'chunk_start BETWEEN \"2022-08-01\" AND \"2022-08-03\"').fetch(format='frame')\n", + "df_wall4_beambreak0 = (\n", + " streams.WallBeamBreak0 * streams.ExperimentWall\n", + " & 'wall_name = \"Wall4\"'\n", + " & 'chunk_start BETWEEN \"2022-08-01\" AND \"2022-08-03\"'\n", + ").fetch(format=\"frame\")\n", "df_wall4_beambreak0" ] }, @@ -5399,7 +5408,7 @@ } ], "source": [ - "df_wall4_beambreak0.explode(['timestamps', 'state'])" + "df_wall4_beambreak0.explode([\"timestamps\", \"state\"])" ] }, { diff --git a/docs/examples/understanding_aeon_data_architecture.ipynb b/docs/examples/understanding_aeon_data_architecture.ipynb index e3df6981..ee491c40 100644 --- a/docs/examples/understanding_aeon_data_architecture.ipynb +++ b/docs/examples/understanding_aeon_data_architecture.ipynb @@ -129,7 +129,7 @@ "time_set = pd.concat(\n", " [\n", " pd.Series(pd.date_range(start_time, start_time + pd.Timedelta(hours=1), freq=\"1s\")),\n", - " pd.Series(pd.date_range(end_time, end_time + pd.Timedelta(hours=1), freq=\"1s\"))\n", + " pd.Series(pd.date_range(end_time, end_time + pd.Timedelta(hours=1), freq=\"1s\")),\n", " ]\n", ")" ] @@ -143,15 +143,16 @@ "\"\"\"Creating a new `Reader` class\"\"\"\n", "\n", "# All readers are subclassed from the base `Reader` class. They thus all contain a `read` method,\n", - "# for returning data from a file in the form of a pandas DataFrame, and the following attributes, \n", + "# for returning data from a file in the form of a pandas DataFrame, and the following attributes,\n", "# which must be specified on object construction:\n", "# `pattern`: a prefix in the filename used by `aeon.io.api.load` to find matching files to load\n", "# `columns`: a list of column names for the returned DataFrame\n", "# `extension`: the file extension of the files to be read\n", "\n", - "# Using these principles, we can recreate a simple reader for reading subject weight data from the \n", + "# Using these principles, we can recreate a simple reader for reading subject weight data from the\n", "# social0.1 experiments, which are saved in .csv format.\n", "\n", + "\n", "# First, we'll create a general Csv reader, subclassed from `Reader`.\n", "class Csv(reader.Reader):\n", " \"\"\"Reads data from csv text files, where the first column stores the Aeon timestamp, in seconds.\"\"\"\n", @@ -161,10 +162,11 @@ "\n", " def read(self, file):\n", " return pd.read_csv(file, header=0, names=self.columns, index_col=0)\n", - " \n", + "\n", + "\n", "# Next, we'll create a reader for the subject weight data, subclassed from `Csv`.\n", "\n", - "# We know from our data that the files of interest start with 'Environment_SubjectWeight' and columns are: \n", + "# We know from our data that the files of interest start with 'Environment_SubjectWeight' and columns are:\n", "# 1) Aeon timestamp in seconds from 1904/01/01 (1904 date system)\n", "# 2) Weight in grams\n", "# 3) Weight stability confidence (0-1)\n", @@ -173,16 +175,17 @@ "# Since the first column (Aeon timestamp) will be set as the index, we'll use the rest as DataFrame columns.\n", "# And we don't need to define `read`, as it will use the `Csv` class's `read` method.\n", "\n", + "\n", "class Subject_Weight(Csv):\n", " \"\"\"Reads subject weight data from csv text files.\"\"\"\n", - " \n", + "\n", " def __init__(\n", - " self, \n", + " self,\n", " pattern=\"Environment_SubjectWeight*\",\n", - " columns=[\"weight\", \"confidence\", \"subject_id\", \"int_id\"], \n", - " extension=\"csv\"\n", + " columns=[\"weight\", \"confidence\", \"subject_id\", \"int_id\"],\n", + " extension=\"csv\",\n", " ):\n", - " super().__init__(pattern, columns, extension)\n" + " super().__init__(pattern, columns, extension)" ] }, { @@ -613,7 +616,7 @@ "source": [ "\"\"\"Loading data via a `Reader` object\"\"\"\n", "\n", - "# We can now load data by specifying a file \n", + "# We can now load data by specifying a file\n", "subject_weight_reader = Subject_Weight()\n", "acq_epoch = \"2023-12-01T14-30-34\"\n", "weight_file = root / acq_epoch / \"Environment/Environment_SubjectWeight_2023-12-02T12-00-00.csv\"\n", @@ -953,7 +956,7 @@ "source": [ "\"\"\"Updating a `Reader` object\"\"\"\n", "\n", - "# Occasionally, we may want to tweak the output from a `Reader` object's `read` method, or some tweaks to \n", + "# Occasionally, we may want to tweak the output from a `Reader` object's `read` method, or some tweaks to\n", "# streams on the acquisition side may require us to make corresponding tweaks to a `Reader` object to\n", "# ensure it works properly. We'll cover some of these cases here.\n", "\n", @@ -970,21 +973,23 @@ "\n", "# 2. Pattern changes\n", "\n", - "# Next, occasionally a stream's filename may change, in which case we'll need to update the `Reader` \n", - "# object's `pattern` to find the new files using `load`: \n", + "# Next, occasionally a stream's filename may change, in which case we'll need to update the `Reader`\n", + "# object's `pattern` to find the new files using `load`:\n", + "\n", "\n", "# Let's simulate a case where the old SubjectWeight stream was called Weight, and create a `Reader` class.\n", "class Subject_Weight(Csv):\n", " \"\"\"Reads subject weight data from csv text files.\"\"\"\n", - " \n", + "\n", " def __init__(\n", - " self, \n", + " self,\n", " pattern=\"Environment_Weight*\",\n", - " columns=[\"weight\", \"confidence\", \"subject_id\", \"int_id\"], \n", - " extension=\"csv\"\n", + " columns=[\"weight\", \"confidence\", \"subject_id\", \"int_id\"],\n", + " extension=\"csv\",\n", " ):\n", " super().__init__(pattern, columns, extension)\n", "\n", + "\n", "# We'll see that we can't find any files with this pattern.\n", "subject_weight_reader = Subject_Weight()\n", "data = aeon.load(root, subject_weight_reader, start=start_time, end=end_time)\n", @@ -993,7 +998,7 @@ "# But if we just update the pattern, `load` will find the files.\n", "subject_weight_reader.pattern = \"Environment_SubjectWeight*\"\n", "data = aeon.load(root, subject_weight_reader, start=start_time, end=end_time)\n", - "display(data) \n", + "display(data)\n", "\n", "\n", "# 3. Bitmask changes for Harp streams\n", @@ -1039,19 +1044,22 @@ "source": [ "\"\"\"Instantiating a `Device` object\"\"\"\n", "\n", - "# A `Device` object is instantiated from a name, followed by one or more 'binder functions', which \n", + "# A `Device` object is instantiated from a name, followed by one or more 'binder functions', which\n", "# return a dictionary of a name paired with a `Reader` object. We call such a dictionary of `:Reader`\n", "# key-value pairs a 'registry'. Each binder function requires a `pattern` argument, which can be used to\n", "# set the pattern of the `Reader` object it returns. This requirement for binder functions is for allowing\n", "# the `Device` to optionally pass its name to appropriately set the pattern of `Reader` objects it contains.\n", "\n", + "\n", "# Below are examples of \"empty pattern\" binder functions, where the pattern doesn't get used.\n", "def subject_weight_binder(pattern): # an example subject weight binder function\n", " return {\"subject_weight\": subject_weight_reader}\n", "\n", + "\n", "def subject_state_binder(pattern): # an example subject state binder function\n", " return {\"subject_state\": reader.Subject(pattern=\"Environment_SubjectState*\")}\n", "\n", + "\n", "d = Device(\"SubjectMetadata\", subject_weight_binder, subject_state_binder)\n", "\n", "# On creation, the `Device` object puts all registries into a single registry, which is accessible via the\n", @@ -1063,7 +1071,7 @@ "# for the `Device` object) are the `Reader` objects associated with that `Device` object.\n", "\n", "# This works because, when a list of `Device` objects are passed into the `DotMap` constructor, the\n", - "# `__iter__` method of the `Device` object returns a tuple of the object's name with its `stream` \n", + "# `__iter__` method of the `Device` object returns a tuple of the object's name with its `stream`\n", "# attribute, which is passed in directly to the DotMap constructor to create a nested DotMap:\n", "# device_name -> stream_name -> stream `Reader` object. This is shown below:\n", "\n", @@ -1095,7 +1103,8 @@ "# Binder functions can return a dict whose value is actually composed of multiple, rather than a single,\n", "# `Reader` objects. This is done by creating nested binder functions, via `register`.\n", "\n", - "# First let's define two standard binder functions, for pellet delivery trigger and beambreak events. \n", + "\n", + "# First let's define two standard binder functions, for pellet delivery trigger and beambreak events.\n", "# In all examples below we'll define \"device-name passed\" binder functions, since the `Device` object which\n", "# will be instantiated from these functions will pass its name to set the pattern of the corresponding\n", "# Reader objects.\n", @@ -1108,11 +1117,13 @@ " \"\"\"Pellet beambreak events.\"\"\"\n", " return {\"pellet_beambreak\": reader.BitmaskEvent(f\"{pattern}_32_*\", 0x22, \"PelletDetected\")}\n", "\n", + "\n", "# Next, we'll define a nested binder function for a \"feeder\", which returns the two binder functions above.\n", "def feeder(pattern):\n", " \"\"\"Feeder commands and events.\"\"\"\n", " return register(pattern, pellet_trigger, pellet_beambreak)\n", "\n", + "\n", "# And further, we can define a higher-level nested binder function for a \"patch\", which includes the\n", "# magnetic encoder values for a patch's wheel in addition to `feeder`.\n", "def patch(pattern):\n", @@ -1120,7 +1131,7 @@ " return register(pattern, feeder, core.encoder)\n", "\n", "\n", - "# We can now instantiate a `Device` object as done previously, from combinations of binder functions, but \n", + "# We can now instantiate a `Device` object as done previously, from combinations of binder functions, but\n", "# also from nested binder functions.\n", "feeder_device = Device(\"Patch1\", pellet_trigger, pellet_beambreak)\n", "feeder_device_nested = Device(\"Patch1\", feeder)\n", @@ -1272,20 +1283,10 @@ "subject_weight_b = lambda pattern: {\"SubjectWeight\": subject_weight_r} # binder function: \"empty pattern\"\n", "\n", "# Nested binder fn Device object.\n", - "environment = Device(\n", - " \"Environment\", # device name\n", - " env_block_state_b,\n", - " light_events_b,\n", - " core.message_log\n", - ")\n", + "environment = Device(\"Environment\", env_block_state_b, light_events_b, core.message_log) # device name\n", "\n", "# Separate Device object for subject-specific streams.\n", - "subject = Device(\n", - " \"Subject\",\n", - " subject_state_b,\n", - " subject_visits_b,\n", - " subject_weight_b\n", - ")\n", + "subject = Device(\"Subject\", subject_state_b, subject_visits_b, subject_weight_b)\n", "\n", "# ---\n", "\n", @@ -1299,12 +1300,7 @@ "cam_names = [\"Camera\" + name for name in cam_names]\n", "camera_b = [lambda pattern, name=name: {name: reader.Video(name + \"*\")} for name in cam_names]\n", "\n", - "camera = Device(\n", - " \"Camera\", \n", - " camera_top_b, \n", - " camera_top_pos_b, \n", - " *camera_b\n", - ")\n", + "camera = Device(\"Camera\", camera_top_b, camera_top_pos_b, *camera_b)\n", "\n", "# ---\n", "\n", @@ -1315,9 +1311,9 @@ "weight_filtered_b = lambda pattern: {\"WeightFiltered\": reader.Harp(\"Nest_202*\", [\"weight(g)\", \"stability\"])}\n", "\n", "nest = Device(\n", - " \"Nest\", \n", - " weight_raw_b, \n", - " weight_filtered_b, \n", + " \"Nest\",\n", + " weight_raw_b,\n", + " weight_filtered_b,\n", ")\n", "\n", "# ---\n", @@ -1352,10 +1348,7 @@ " }\n", " patch_b.append(fn)\n", "\n", - "patch = Device(\n", - " \"Patch\", \n", - " *patch_b\n", - ")\n", + "patch = Device(\"Patch\", *patch_b)\n", "# ---\n", "\n", "# Rfid\n", @@ -1365,10 +1358,7 @@ "rfid_names = [\"Rfid\" + name for name in rfid_names]\n", "rfid_b = [lambda pattern, name=name: {name: reader.Harp(name + \"*\", [\"rfid\"])} for name in rfid_names]\n", "\n", - "rfid = Device(\n", - " \"Rfid\", \n", - " *rfid_b\n", - ")\n", + "rfid = Device(\"Rfid\", *rfid_b)\n", "\n", "# ---" ] @@ -1379,17 +1369,7 @@ "metadata": {}, "outputs": [], "source": [ - "social01 = DotMap(\n", - " [\n", - " metadata,\n", - " environment,\n", - " subject,\n", - " camera,\n", - " nest,\n", - " patch,\n", - " rfid\n", - " ]\n", - ")" + "social01 = DotMap([metadata, environment, subject, camera, nest, patch, rfid])" ] }, { @@ -4125,6 +4105,7 @@ "source": [ "\"\"\"Test all readers in schema.\"\"\"\n", "\n", + "\n", "def find_obj(dotmap, obj):\n", " \"\"\"Returns a list of objects of type `obj` found in a DotMap.\"\"\"\n", " objs = []\n", @@ -4135,12 +4116,13 @@ " objs.extend(find_obj(value, obj))\n", " return objs\n", "\n", + "\n", "readers = find_obj(social01, reader.Reader)\n", "start_time = pd.Timestamp(\"2023-12-05 15:00:00\")\n", "end_time = pd.Timestamp(\"2023-12-07 11:00:00\")\n", "for r in readers:\n", " data = aeon.load(root, r, start=start_time, end=end_time)\n", - " #assert not data.empty, f\"No data found with {r}.\"\n", + " # assert not data.empty, f\"No data found with {r}.\"\n", " print(f\"\\n{r.pattern}:\")\n", " display(data.head())" ] diff --git a/tests/conftest.py b/tests/conftest.py index 7ad5beac..1e1bb12e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -19,8 +19,7 @@ def data_dir(): - """Returns test data directory - """ + """Returns test data directory""" return os.path.join(os.path.dirname(os.path.realpath(__file__)), "data") @@ -53,9 +52,7 @@ def dj_config(): dj.config.load(dj_config_fp) dj.config["safemode"] = False assert "custom" in dj.config - dj.config["custom"][ - "database.prefix" - ] = f"u_{dj.config['database.user']}_testsuite_" + dj.config["custom"]["database.prefix"] = f"u_{dj.config['database.user']}_testsuite_" def load_pipeline(): diff --git a/tests/dj_pipeline/test_acquisition.py b/tests/dj_pipeline/test_acquisition.py index 8e5eec37..28f39d12 100644 --- a/tests/dj_pipeline/test_acquisition.py +++ b/tests/dj_pipeline/test_acquisition.py @@ -16,29 +16,18 @@ def test_epoch_chunk_ingestion(test_params, pipeline, epoch_chunk_ingestion): @mark.ingestion -def test_experimentlog_ingestion( - test_params, pipeline, epoch_chunk_ingestion, experimentlog_ingestion -): +def test_experimentlog_ingestion(test_params, pipeline, epoch_chunk_ingestion, experimentlog_ingestion): acquisition = pipeline["acquisition"] assert ( - len( - acquisition.ExperimentLog.Message - & {"experiment_name": test_params["experiment_name"]} - ) + len(acquisition.ExperimentLog.Message & {"experiment_name": test_params["experiment_name"]}) == test_params["experiment_log_message_count"] ) assert ( - len( - acquisition.SubjectEnterExit.Time - & {"experiment_name": test_params["experiment_name"]} - ) + len(acquisition.SubjectEnterExit.Time & {"experiment_name": test_params["experiment_name"]}) == test_params["subject_enter_exit_count"] ) assert ( - len( - acquisition.SubjectWeight.WeightTime - & {"experiment_name": test_params["experiment_name"]} - ) + len(acquisition.SubjectWeight.WeightTime & {"experiment_name": test_params["experiment_name"]}) == test_params["subject_weight_time_count"] ) diff --git a/tests/dj_pipeline/test_pipeline_instantiation.py b/tests/dj_pipeline/test_pipeline_instantiation.py index f7e06d0e..48cfe8ea 100644 --- a/tests/dj_pipeline/test_pipeline_instantiation.py +++ b/tests/dj_pipeline/test_pipeline_instantiation.py @@ -19,12 +19,9 @@ def test_experiment_creation(test_params, pipeline, experiment_creation): experiment_name = test_params["experiment_name"] assert acquisition.Experiment.fetch1("experiment_name") == experiment_name raw_dir = ( - acquisition.Experiment.Directory - & {"experiment_name": experiment_name, "directory_type": "raw"} + acquisition.Experiment.Directory & {"experiment_name": experiment_name, "directory_type": "raw"} ).fetch1("directory_path") assert raw_dir == test_params["raw_dir"] - exp_subjects = ( - acquisition.Experiment.Subject & {"experiment_name": experiment_name} - ).fetch("subject") + exp_subjects = (acquisition.Experiment.Subject & {"experiment_name": experiment_name}).fetch("subject") assert len(exp_subjects) == test_params["subject_count"] assert "BAA-1100701" in exp_subjects diff --git a/tests/dj_pipeline/test_tracking.py b/tests/dj_pipeline/test_tracking.py index 0aa6bb97..434bc008 100644 --- a/tests/dj_pipeline/test_tracking.py +++ b/tests/dj_pipeline/test_tracking.py @@ -6,7 +6,9 @@ index = 0 column_name = "position_x" # data column to run test on -file_name = "exp0.2-r0-20220524090000-21053810-20220524082942-0-0.npy" # test file to be saved with save_test_data +file_name = ( + "exp0.2-r0-20220524090000-21053810-20220524082942-0-0.npy" # test file to be saved with save_test_data +) def save_test_data(pipeline, test_params): @@ -17,9 +19,7 @@ def save_test_data(pipeline, test_params): file_name = ( "-".join( [ - v.strftime("%Y%m%d%H%M%S") - if isinstance(v, datetime.datetime) - else str(v) + v.strftime("%Y%m%d%H%M%S") if isinstance(v, datetime.datetime) else str(v) for v in key.values() ] ) @@ -39,18 +39,13 @@ def test_camera_tracking_ingestion(test_params, pipeline, camera_tracking_ingest tracking = pipeline["tracking"] - assert ( - len(tracking.CameraTracking.Object()) - == test_params["camera_tracking_object_count"] - ) + assert len(tracking.CameraTracking.Object()) == test_params["camera_tracking_object_count"] key = tracking.CameraTracking.Object().fetch("KEY")[index] file_name = ( "-".join( [ - v.strftime("%Y%m%d%H%M%S") - if isinstance(v, datetime.datetime) - else str(v) + v.strftime("%Y%m%d%H%M%S") if isinstance(v, datetime.datetime) else str(v) for v in key.values() ] ) From e60b76625ab030343fd985ac4721d48dc32568be Mon Sep 17 00:00:00 2001 From: glopesdev Date: Fri, 6 Sep 2024 16:32:55 +0100 Subject: [PATCH 03/36] Move top-level linter settings to lint section --- pyproject.toml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 720ee826..fc08be8b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ exclude = ''' extend-exclude = "aeon/dj_pipeline/streams.py" [tool.ruff] -select = [ +lint.select = [ "E", "W", "F", @@ -115,7 +115,7 @@ select = [ "PL", ] line-length = 108 -ignore = [ +lint.ignore = [ "E201", "E202", "E203", @@ -135,7 +135,7 @@ extend-exclude = [ ".vscode", "aeon/dj_pipeline/streams.py", ] -[tool.ruff.per-file-ignores] +[tool.ruff.lint.per-file-ignores] "aeon/dj_pipeline/*" = [ "B006", "B021", @@ -167,7 +167,7 @@ extend-exclude = [ "I001", ] -[tool.ruff.pydocstyle] +[tool.ruff.lint.pydocstyle] convention = "google" [tool.pyright] From d2a510487eaf79d99397777ba0c6961fdf63178d Mon Sep 17 00:00:00 2001 From: glopesdev Date: Fri, 6 Sep 2024 16:33:20 +0100 Subject: [PATCH 04/36] Ignore missing docs in __init__ and magic methods --- pyproject.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index fc08be8b..d589fd21 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -116,6 +116,8 @@ lint.select = [ ] line-length = 108 lint.ignore = [ + "D105", # skip adding docstrings for magic methods + "D107", # skip adding docstrings for __init__ "E201", "E202", "E203", From 743324ad88e8a355254d40953d2037009f7b4e53 Mon Sep 17 00:00:00 2001 From: glopesdev Date: Fri, 6 Sep 2024 16:43:35 +0100 Subject: [PATCH 05/36] Apply ruff recommendations to low-level API --- aeon/io/device.py | 8 ++++---- aeon/schema/streams.py | 5 ++--- tests/io/test_api.py | 3 ++- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/aeon/io/device.py b/aeon/io/device.py index cbebb42b..d7707fb0 100644 --- a/aeon/io/device.py +++ b/aeon/io/device.py @@ -12,10 +12,10 @@ def compositeStream(pattern, *args): if inspect.isclass(binder_fn): for method in vars(binder_fn).values(): if isinstance(method, staticmethod): - registry.update(method.__func__(pattern)) + composite.update(method.__func__(pattern)) else: - registry.update(binder_fn(pattern)) - return registry + composite.update(binder_fn(pattern)) + return composite @deprecated("The Device class has been moved to the streams module.") @@ -34,7 +34,7 @@ class Device: def __init__(self, name, *args, pattern=None): self.name = name - self.registry = register(name if pattern is None else pattern, *args) + self.registry = compositeStream(name if pattern is None else pattern, *args) def __iter__(self): if len(self.registry) == 1: diff --git a/aeon/schema/streams.py b/aeon/schema/streams.py index f306bf63..098b43ef 100644 --- a/aeon/schema/streams.py +++ b/aeon/schema/streams.py @@ -31,13 +31,12 @@ def __init__(self, path, *args): self._nested = ( member for member in vars(self.__class__).values() - if inspect.isclass(member) and issubclass(member, (Stream, StreamGroup)) + if inspect.isclass(member) and issubclass(member, Stream | StreamGroup) ) def __iter__(self): for factory in chain(self._nested, self._args): - for stream in iter(factory(self.path)): - yield stream + yield from iter(factory(self.path)) class Device: diff --git a/tests/io/test_api.py b/tests/io/test_api.py index 253fceae..745947f2 100644 --- a/tests/io/test_api.py +++ b/tests/io/test_api.py @@ -32,7 +32,8 @@ def test_load_filter_nonchunked(): @mark.api def test_load_monotonic(): data = aeon.load(monotonic_path, exp02.Patch2.Encoder) - assert len(data) > 0 and data.index.is_monotonic_increasing + assert len(data) > 0 + assert data.index.is_monotonic_increasing @mark.api From 257d9cdb07e36e5c188a1f79bb099c5d859855a7 Mon Sep 17 00:00:00 2001 From: glopesdev Date: Fri, 6 Sep 2024 16:45:20 +0100 Subject: [PATCH 06/36] Ignore missing docs for module, package and tests --- pyproject.toml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d589fd21..befe0d78 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -116,6 +116,8 @@ lint.select = [ ] line-length = 108 lint.ignore = [ + "D100", # skip adding docstrings for module + "D104", # ignore missing docstring in public package "D105", # skip adding docstrings for magic methods "D107", # skip adding docstrings for __init__ "E201", @@ -138,14 +140,15 @@ extend-exclude = [ "aeon/dj_pipeline/streams.py", ] [tool.ruff.lint.per-file-ignores] +"tests/*" = [ + "D103", # skip adding docstrings for public functions +] "aeon/dj_pipeline/*" = [ "B006", "B021", - "D100", # skip adding docstrings for module "D101", # skip adding docstrings for table class since it is added inside definition "D102", # skip adding docstrings for make function "D103", # skip adding docstrings for public functions - "D104", # ignore missing docstring in public package "D106", # skip adding docstrings for Part tables "E501", "F401", # ignore unused import errors From e4fe028602d75dc46d5519ce813c8601f30ae360 Mon Sep 17 00:00:00 2001 From: glopesdev Date: Fri, 6 Sep 2024 16:55:03 +0100 Subject: [PATCH 07/36] Ignore missing docs for schema classes and streams --- pyproject.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index befe0d78..cef47b78 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -143,6 +143,10 @@ extend-exclude = [ "tests/*" = [ "D103", # skip adding docstrings for public functions ] +"aeon/schema/*" = [ + "D101", # skip adding docstrings for schema classes + "D106", # skip adding docstrings for nested streams +] "aeon/dj_pipeline/*" = [ "B006", "B021", From 0927a9ac1e7be460a407b4c0206dca57190eb90f Mon Sep 17 00:00:00 2001 From: glopesdev Date: Fri, 6 Sep 2024 17:25:57 +0100 Subject: [PATCH 08/36] Apply more ruff recommendations to low-level API --- aeon/analysis/block_plotting.py | 8 ++++---- aeon/analysis/movies.py | 18 +++++++++++------- aeon/analysis/plotting.py | 17 +++++++++++------ aeon/analysis/utils.py | 21 ++++++++++++--------- aeon/io/api.py | 16 ++++++++++------ aeon/io/reader.py | 15 +++++++++------ aeon/qc/video.py | 3 ++- aeon/schema/streams.py | 1 + 8 files changed, 60 insertions(+), 39 deletions(-) diff --git a/aeon/analysis/block_plotting.py b/aeon/analysis/block_plotting.py index 26b20cf9..be86b68f 100644 --- a/aeon/analysis/block_plotting.py +++ b/aeon/analysis/block_plotting.py @@ -25,9 +25,7 @@ "star", ] patch_markers_symbols = ["●", "⧓", "■", "⧗", "♦", "✖", "×", "▲", "★"] -patch_markers_dict = { - marker: symbol for marker, symbol in zip(patch_markers, patch_markers_symbols, strict=False) -} +patch_markers_dict = dict(zip(patch_markers, patch_markers_symbols, strict=False)) patch_markers_linestyles = ["solid", "dash", "dot", "dashdot", "longdashdot"] @@ -40,7 +38,9 @@ def gen_hex_grad(hex_col, vals, min_l=0.3): cur_l = (l * val) + (min_l * (1 - val)) # get cur lightness relative to `hex_col` cur_l = max(min(cur_l, l), min_l) # set min, max bounds cur_rgb_col = hls_to_rgb(h, cur_l, s) # convert to rgb - cur_hex_col = "#%02x%02x%02x" % tuple(int(c * 255) for c in cur_rgb_col) # convert to hex + cur_hex_col = "#{:02x}{:02x}{:02x}".format( + *tuple(int(c * 255) for c in cur_rgb_col) + ) # convert to hex grad[i] = cur_hex_col return grad diff --git a/aeon/analysis/movies.py b/aeon/analysis/movies.py index f71a0c3e..078efad2 100644 --- a/aeon/analysis/movies.py +++ b/aeon/analysis/movies.py @@ -8,8 +8,7 @@ def gridframes(frames, width, height, shape=None): - """Arranges a set of frames into a grid layout with the specified - pixel dimensions and shape. + """Arranges a set of frames into a grid layout with the specified pixel dimensions and shape. :param list frames: A list of frames to include in the grid layout. :param int width: The width of the output grid image, in pixels. @@ -65,7 +64,7 @@ def groupframes(frames, n, fun): i = i + 1 -def triggerclip(data, events, before=pd.Timedelta(0), after=pd.Timedelta(0)): +def triggerclip(data, events, before=None, after=None): """Split video data around the specified sequence of event timestamps. :param DataFrame data: @@ -76,10 +75,16 @@ def triggerclip(data, events, before=pd.Timedelta(0), after=pd.Timedelta(0)): :return: A pandas DataFrame containing the frames, clip and sequence numbers for each event timestamp. """ - if before is not pd.Timedelta: + if before is None: + before = pd.Timedelta(0) + elif before is not pd.Timedelta: before = pd.Timedelta(before) - if after is not pd.Timedelta: + + if after is None: + after = pd.Timedelta(0) + elif after is not pd.Timedelta: after = pd.Timedelta(after) + if events is not pd.Index: events = events.index @@ -107,8 +112,7 @@ def collatemovie(clipdata, fun): def gridmovie(clipdata, width, height, shape=None): - """Collates a set of video clips into a grid movie with the specified pixel dimensions - and grid layout. + """Collates a set of video clips into a grid movie with the specified pixel dimensions and grid layout. :param DataFrame clipdata: A pandas DataFrame where each row specifies video path, frame number, clip and sequence number. diff --git a/aeon/analysis/plotting.py b/aeon/analysis/plotting.py index f89d78f5..39168026 100644 --- a/aeon/analysis/plotting.py +++ b/aeon/analysis/plotting.py @@ -6,7 +6,7 @@ from matplotlib import colors from matplotlib.collections import LineCollection -from aeon.analysis.utils import * +from aeon.analysis.utils import rate, sessiontime def heatmap(position, frequency, ax=None, **kwargs): @@ -60,8 +60,9 @@ def rateplot( ax=None, **kwargs, ): - """Plot the continuous event rate and raster of a discrete event sequence, given the specified - window size and sampling frequency. + """Plot the continuous event rate and raster of a discrete event sequence. + + The window size and sampling frequency can be specified. :param Series events: The discrete sequence of events. :param offset window: The time period of each window used to compute the rate. @@ -69,7 +70,7 @@ def rateplot( :param number, optional weight: A weight used to scale the continuous rate of each window. :param datetime, optional start: The left bound of the time range for the continuous rate. :param datetime, optional end: The right bound of the time range for the continuous rate. - :param datetime, optional smooth: The size of the smoothing kernel applied to the continuous rate output. + :param datetime, optional smooth: The size of the smoothing kernel applied to the rate output. :param DateOffset, Timedelta or str, optional smooth: The size of the smoothing kernel applied to the continuous rate output. :param bool, optional center: Specifies whether to center the convolution kernels. @@ -108,8 +109,8 @@ def colorline( x, y, z=None, - cmap=plt.get_cmap("copper"), - norm=plt.Normalize(0.0, 1.0), + cmap=None, + norm=None, ax=None, **kwargs, ): @@ -128,6 +129,10 @@ def colorline( ax = plt.gca() if z is None: z = np.linspace(0.0, 1.0, len(x)) + if cmap is None: + cmap = plt.get_cmap("copper") + if norm is None: + norm = plt.Normalize(0.0, 1.0) z = np.asarray(z) points = np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) diff --git a/aeon/analysis/utils.py b/aeon/analysis/utils.py index eb738106..4c229771 100644 --- a/aeon/analysis/utils.py +++ b/aeon/analysis/utils.py @@ -3,8 +3,9 @@ def distancetravelled(angle, radius=4.0): - """Calculates the total distance travelled on the wheel, by taking into account - its radius and the total number of turns in both directions across time. + """Calculates the total distance travelled on the wheel. + + Takes into account the wheel radius and the total number of turns in both directions across time. :param Series angle: A series of magnetic encoder measurements. :param float radius: The radius of the wheel, in metric units. @@ -22,10 +23,11 @@ def distancetravelled(angle, radius=4.0): def visits(data, onset="Enter", offset="Exit"): - """Computes duration, onset and offset times from paired events. Allows for missing data - by trying to match event onset times with subsequent offset times. If the match fails, - event offset metadata is filled with NaN. Any additional metadata columns in the data - frame will be paired and included in the output. + """Computes duration, onset and offset times from paired events. + + Allows for missing data by trying to match event onset times with subsequent offset times. + If the match fails, event offset metadata is filled with NaN. Any additional metadata columns + in the data frame will be paired and included in the output. :param DataFrame data: A pandas data frame containing visit onset and offset events. :param str, optional onset: The label used to identify event onsets. @@ -69,8 +71,9 @@ def visits(data, onset="Enter", offset="Exit"): def rate(events, window, frequency, weight=1, start=None, end=None, smooth=None, center=False): - """Computes the continuous event rate from a discrete event sequence, given the specified - window size and sampling frequency. + """Computes the continuous event rate from a discrete event sequence. + + The window size and sampling frequency can be specified. :param Series events: The discrete sequence of events. :param offset window: The time period of each window used to compute the rate. @@ -78,7 +81,7 @@ def rate(events, window, frequency, weight=1, start=None, end=None, smooth=None, :param number, optional weight: A weight used to scale the continuous rate of each window. :param datetime, optional start: The left bound of the time range for the continuous rate. :param datetime, optional end: The right bound of the time range for the continuous rate. - :param datetime, optional smooth: The size of the smoothing kernel applied to the continuous rate output. + :param datetime, optional smooth: The size of the smoothing kernel applied to the rate output. :param DateOffset, Timedelta or str, optional smooth: The size of the smoothing kernel applied to the continuous rate output. :param bool, optional center: Specifies whether to center the convolution kernels. diff --git a/aeon/io/api.py b/aeon/io/api.py index 5c16159f..f31d3b01 100644 --- a/aeon/io/api.py +++ b/aeon/io/api.py @@ -61,11 +61,13 @@ def _empty(columns): def load(root, reader, start=None, end=None, time=None, tolerance=None, epoch=None): - """Extracts chunk data from the root path of an Aeon dataset using the specified data stream - reader. A subset of the data can be loaded by specifying an optional time range, or a list - of timestamps used to index the data on file. Returned data will be sorted chronologically. + """Extracts chunk data from the root path of an Aeon dataset. - :param str or PathLike root: The root path, or prioritised sequence of paths, where epoch data is stored. + Reads all chunk data using the specified data stream reader. A subset of the data can be loaded + by specifying an optional time range, or a list of timestamps used to index the data on file. + Returned data will be sorted chronologically. + + :param str or PathLike root: The root path, or prioritised sequence of paths, where data is stored. :param Reader reader: A data stream reader object used to read chunk data from the dataset. :param datetime, optional start: The left bound of the time range to extract. :param datetime, optional end: The right bound of the time range to extract. @@ -143,10 +145,12 @@ def load(root, reader, start=None, end=None, time=None, tolerance=None, epoch=No import warnings if not data.index.has_duplicates: - warnings.warn(f"data index for {reader.pattern} contains out-of-order timestamps!") + warnings.warn( + f"data index for {reader.pattern} contains out-of-order timestamps!", stacklevel=2 + ) data = data.sort_index() else: - warnings.warn(f"data index for {reader.pattern} contains duplicate keys!") + warnings.warn(f"data index for {reader.pattern} contains duplicate keys!", stacklevel=2) data = data[~data.index.duplicated(keep="first")] return data.loc[start:end] return data diff --git a/aeon/io/reader.py b/aeon/io/reader.py index e5c86d5d..0dcae1f2 100644 --- a/aeon/io/reader.py +++ b/aeon/io/reader.py @@ -116,8 +116,9 @@ def read(self, file): class Csv(Reader): - """Extracts data from comma-separated (csv) text files, where the first column - stores the Aeon timestamp, in seconds. + """Extracts data from comma-separated (CSV) text files. + + The first column stores the Aeon timestamp, in seconds. """ def __init__(self, pattern, columns, dtype=None, extension="csv"): @@ -218,8 +219,9 @@ def __init__(self, pattern, value, tag): self.tag = tag def read(self, file): - """Reads a specific event code from digital data and matches it to the - specified unique identifier. + """Reads a specific event code from digital data. + + Each data value is matched against the unique event identifier. """ data = super().read(file) data = data[(data.event & self.value) == self.value] @@ -239,8 +241,9 @@ def __init__(self, pattern, mask, columns): self.mask = mask def read(self, file): - """Reads a specific event code from digital data and matches it to the - specified unique identifier. + """Reads a specific event code from digital data. + + Each data value is checked against the specified bitmask. """ data = super().read(file) state = data[self.columns] & self.mask diff --git a/aeon/qc/video.py b/aeon/qc/video.py index b9a3b420..857f94f9 100644 --- a/aeon/qc/video.py +++ b/aeon/qc/video.py @@ -37,7 +37,8 @@ max_harp_delta = deltas.time_delta.max().total_seconds() max_camera_delta = deltas.hw_timestamp_delta.max() / 1e9 # convert nanoseconds to seconds print( - f"drops: {drop_count - frame_offset} frameOffset: {drop_count} maxHarpDelta: {max_harp_delta} s maxCameraDelta: {max_camera_delta} s" + f"drops: {drop_count - frame_offset} frameOffset: {drop_count} " + + "maxHarpDelta: {max_harp_delta} s maxCameraDelta: {max_camera_delta} s" ) stats.append((drop_count, max_harp_delta, max_camera_delta, chunk.path)) deltas.set_index(data.time, inplace=True) diff --git a/aeon/schema/streams.py b/aeon/schema/streams.py index 098b43ef..2c5d57b2 100644 --- a/aeon/schema/streams.py +++ b/aeon/schema/streams.py @@ -67,6 +67,7 @@ def _createStreams(path, args): warn( f"Stream group classes with default constructors are deprecated. {factory}", category=DeprecationWarning, + stacklevel=2, ) for method in vars(factory).values(): if isinstance(method, staticmethod): From be5d3c169c70469b96c4c09666b8efa398e86832 Mon Sep 17 00:00:00 2001 From: lochhh Date: Fri, 26 Apr 2024 13:58:33 +0000 Subject: [PATCH 09/36] Update pre-commit-config --- .pre-commit-config.yaml | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 20d1ff13..6256c569 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,8 +1,4 @@ # For info on running pre-commit manually, see `pre-commit run --help` - -default_language_version: - python: python3.11 - files: "^(test|aeon)\/.*$" repos: - repo: meta @@ -10,7 +6,7 @@ repos: - id: identity - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.6.0 hooks: - id: check-json - id: check-yaml @@ -24,7 +20,7 @@ repos: - id: mixed-line-ending - id: trailing-whitespace args: [--markdown-linebreak-ext=md] - + - repo: https://github.com/psf/black rev: 23.7.0 hooks: @@ -32,13 +28,13 @@ repos: args: [--check, --config, ./pyproject.toml] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.286 + rev: v0.6.4 hooks: - id: ruff args: [--config, ./pyproject.toml] - repo: https://github.com/RobertCraigie/pyright-python - rev: v1.1.324 + rev: v1.1.380 hooks: - id: pyright args: [--level, error, --project, ./pyproject.toml] From 38aeebab6feab55789a3c16db052babf3637dc81 Mon Sep 17 00:00:00 2001 From: lochhh Date: Fri, 26 Apr 2024 14:04:01 +0000 Subject: [PATCH 10/36] Remove black dependency --- .pre-commit-config.yaml | 8 +------- env_config/env_dev.yml | 1 - pyproject.toml | 19 ------------------- 3 files changed, 1 insertion(+), 27 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6256c569..97fbc825 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -20,18 +20,12 @@ repos: - id: mixed-line-ending - id: trailing-whitespace args: [--markdown-linebreak-ext=md] - - - repo: https://github.com/psf/black - rev: 23.7.0 - hooks: - - id: black - args: [--check, --config, ./pyproject.toml] - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.6.4 hooks: - id: ruff - args: [--config, ./pyproject.toml] + - id: ruff-format - repo: https://github.com/RobertCraigie/pyright-python rev: v1.1.380 diff --git a/env_config/env_dev.yml b/env_config/env_dev.yml index 19aeeab2..b1e0050f 100644 --- a/env_config/env_dev.yml +++ b/env_config/env_dev.yml @@ -7,7 +7,6 @@ channels: - conda-forge - defaults dependencies: - - black[jupyter] - gh - ipdb - jellyfish diff --git a/pyproject.toml b/pyproject.toml index cef47b78..5283a14a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,7 +51,6 @@ dependencies = [ [project.optional-dependencies] dev = [ "bandit", - "black[jupyter]", "gh", "ipdb", "pre-commit", @@ -78,24 +77,6 @@ DataJoint = "https://docs.datajoint.org/" [tool.setuptools.packages.find] include = ["aeon*"] -[tool.black] -line-length = 108 -color = false -exclude = ''' -/( - \.git - | \.mypy_cache - | \.tox - | \.venv - | _build - | build - | dist - | env - | venv -)/ -''' -extend-exclude = "aeon/dj_pipeline/streams.py" - [tool.ruff] lint.select = [ "E", From 53e88c25e2e8f5f1c975fcfcbd62c132a544362c Mon Sep 17 00:00:00 2001 From: lochhh Date: Thu, 12 Sep 2024 20:05:08 +0100 Subject: [PATCH 11/36] Temporarily disable ruff and pyright in pre-commit --- .pre-commit-config.yaml | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 97fbc825..ed735be5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,17 +21,22 @@ repos: - id: trailing-whitespace args: [--markdown-linebreak-ext=md] - - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.4 - hooks: - - id: ruff - - id: ruff-format + # - repo: https://github.com/astral-sh/ruff-pre-commit + # rev: v0.6.4 + # hooks: + # # Run the linter with the `--fix` flag. + # - id: ruff + # types_or: [ python, pyi ] + # args: [ --fix ] + # # Run the formatter. + # - id: ruff-format + # types_or: [ python, pyi ] - - repo: https://github.com/RobertCraigie/pyright-python - rev: v1.1.380 - hooks: - - id: pyright - args: [--level, error, --project, ./pyproject.toml] + # - repo: https://github.com/RobertCraigie/pyright-python + # rev: v1.1.380 + # hooks: + # - id: pyright + # args: [--level, error, --project, ./pyproject.toml] # Pytest is expensive, so we show its set-up but leave it commented out. # - repo: local From 6798e077a070415871547f9e8b9302e9a926e6ea Mon Sep 17 00:00:00 2001 From: lochhh Date: Thu, 12 Sep 2024 20:14:50 +0100 Subject: [PATCH 12/36] Auto-fix mixed lined endings and trailing whitespace --- aeon/README.md | 2 +- aeon/analysis/readme.md | 2 +- aeon/dj_pipeline/acquisition.py | 2 +- aeon/dj_pipeline/analysis/block_analysis.py | 2 +- .../device_type_mapper.json | 2 +- .../setup_yml/Experiment0.1.yml | 15 +- .../setup_yml/SocialExperiment0.yml | 15 +- .../docs/datajoint_analysis_diagram.svg | 82 +++++++++-- .../docs/datajoint_overview_diagram.svg | 132 +++++++++++++++--- .../docs/notebooks/analysis_diagram.svg | 82 +++++++++-- aeon/dj_pipeline/docs/notebooks/diagram.ipynb | 2 +- aeon/dj_pipeline/docs/notebooks/diagram.svg | 132 +++++++++++++++--- .../social_experiments_block_analysis.ipynb | 2 +- aeon/dj_pipeline/streams.py | 18 ++- .../dj_pipeline/utils/device_type_mapper.json | 2 +- .../webapps/sciviz/apk_requirements.txt | 2 +- 16 files changed, 397 insertions(+), 97 deletions(-) diff --git a/aeon/README.md b/aeon/README.md index 4287ca86..792d6005 100644 --- a/aeon/README.md +++ b/aeon/README.md @@ -1 +1 @@ -# \ No newline at end of file +# diff --git a/aeon/analysis/readme.md b/aeon/analysis/readme.md index 4287ca86..792d6005 100644 --- a/aeon/analysis/readme.md +++ b/aeon/analysis/readme.md @@ -1 +1 @@ -# \ No newline at end of file +# diff --git a/aeon/dj_pipeline/acquisition.py b/aeon/dj_pipeline/acquisition.py index b2fd00d8..973d7c07 100644 --- a/aeon/dj_pipeline/acquisition.py +++ b/aeon/dj_pipeline/acquisition.py @@ -475,7 +475,7 @@ class MessageLog(dj.Part): -> master --- sample_count: int # number of data points acquired from this stream for a given chunk - timestamps: longblob # (datetime) + timestamps: longblob # (datetime) priority: longblob type: longblob message: longblob diff --git a/aeon/dj_pipeline/analysis/block_analysis.py b/aeon/dj_pipeline/analysis/block_analysis.py index ef50138f..19566357 100644 --- a/aeon/dj_pipeline/analysis/block_analysis.py +++ b/aeon/dj_pipeline/analysis/block_analysis.py @@ -528,7 +528,7 @@ def make(self, key): @schema class BlockPlots(dj.Computed): - definition = """ + definition = """ -> BlockAnalysis --- subject_positions_plot: longblob diff --git a/aeon/dj_pipeline/create_experiments/device_type_mapper.json b/aeon/dj_pipeline/create_experiments/device_type_mapper.json index 848f0f3b..f9b16280 100644 --- a/aeon/dj_pipeline/create_experiments/device_type_mapper.json +++ b/aeon/dj_pipeline/create_experiments/device_type_mapper.json @@ -1 +1 @@ -{"VideoController": "CameraController", "CameraTop": "SpinnakerVideoSource", "CameraWest": "SpinnakerVideoSource", "CameraEast": "SpinnakerVideoSource", "CameraNorth": "SpinnakerVideoSource", "CameraSouth": "SpinnakerVideoSource", "CameraPatch1": "SpinnakerVideoSource", "CameraPatch2": "SpinnakerVideoSource", "CameraNest": "SpinnakerVideoSource", "AudioAmbient": "AudioSource", "Patch1": "UndergroundFeeder", "Patch2": "UndergroundFeeder", "WeightNest": "WeightScale", "TrackingTop": "PositionTracking", "ActivityCenter": "ActivityTracking", "ActivityArena": "ActivityTracking", "ActivityNest": "ActivityTracking", "ActivityPatch1": "ActivityTracking", "ActivityPatch2": "ActivityTracking", "InNest": "RegionTracking", "InPatch1": "RegionTracking", "InPatch2": "RegionTracking", "ArenaCenter": "DistanceFromPoint", "InArena": "InRange", "InCorridor": "InRange", "ClockSynchronizer": "TimestampGenerator", "Rfid": "Rfid Reader", "CameraPatch3": "SpinnakerVideoSource", "Patch3": "UndergroundFeeder", "Nest": "WeightScale", "RfidNest1": "RfidReader", "RfidNest2": "RfidReader", "RfidGate": "RfidReader", "RfidPatch1": "RfidReader", "RfidPatch2": "RfidReader", "RfidPatch3": "RfidReader", "LightCycle": "EnvironmentCondition"} \ No newline at end of file +{"VideoController": "CameraController", "CameraTop": "SpinnakerVideoSource", "CameraWest": "SpinnakerVideoSource", "CameraEast": "SpinnakerVideoSource", "CameraNorth": "SpinnakerVideoSource", "CameraSouth": "SpinnakerVideoSource", "CameraPatch1": "SpinnakerVideoSource", "CameraPatch2": "SpinnakerVideoSource", "CameraNest": "SpinnakerVideoSource", "AudioAmbient": "AudioSource", "Patch1": "UndergroundFeeder", "Patch2": "UndergroundFeeder", "WeightNest": "WeightScale", "TrackingTop": "PositionTracking", "ActivityCenter": "ActivityTracking", "ActivityArena": "ActivityTracking", "ActivityNest": "ActivityTracking", "ActivityPatch1": "ActivityTracking", "ActivityPatch2": "ActivityTracking", "InNest": "RegionTracking", "InPatch1": "RegionTracking", "InPatch2": "RegionTracking", "ArenaCenter": "DistanceFromPoint", "InArena": "InRange", "InCorridor": "InRange", "ClockSynchronizer": "TimestampGenerator", "Rfid": "Rfid Reader", "CameraPatch3": "SpinnakerVideoSource", "Patch3": "UndergroundFeeder", "Nest": "WeightScale", "RfidNest1": "RfidReader", "RfidNest2": "RfidReader", "RfidGate": "RfidReader", "RfidPatch1": "RfidReader", "RfidPatch2": "RfidReader", "RfidPatch3": "RfidReader", "LightCycle": "EnvironmentCondition"} diff --git a/aeon/dj_pipeline/create_experiments/setup_yml/Experiment0.1.yml b/aeon/dj_pipeline/create_experiments/setup_yml/Experiment0.1.yml index 66cde91a..2809393c 100644 --- a/aeon/dj_pipeline/create_experiments/setup_yml/Experiment0.1.yml +++ b/aeon/dj_pipeline/create_experiments/setup_yml/Experiment0.1.yml @@ -1,6 +1,6 @@ name: Experiment 0.1 start-time: 2021-06-02T23:49:41.0000000 -description: +description: data-path: /ceph/aeon/experiment0.1 time-bin-size: 1 arena: @@ -16,15 +16,15 @@ arena: gates: - position: *o0 clock-synchronizer: - serial-number: + serial-number: port-name: COM6 description: ClockSynchronizer ambient-microphone: - serial-number: + serial-number: description: AudioAmbient sample-rate: 192000 video-controller: - serial-number: + serial-number: port-name: COM3 description: VideoController standard-trigger-frequency: 50 @@ -71,14 +71,14 @@ cameras: trigger-source: HighSpeedTrigger gain: 10 patches: -- serial-number: +- serial-number: port-name: COM4 description: Patch1 position: *o0 radius: 4 starting-torque: 0 - workflow-path: -- serial-number: + workflow-path: +- serial-number: port-name: COM7 description: Patch2 position: *o0 @@ -89,4 +89,3 @@ weight-scales: - serial-number: SCALE1 description: WeightData nest: 1 - diff --git a/aeon/dj_pipeline/create_experiments/setup_yml/SocialExperiment0.yml b/aeon/dj_pipeline/create_experiments/setup_yml/SocialExperiment0.yml index 148eb180..d29f3c1e 100644 --- a/aeon/dj_pipeline/create_experiments/setup_yml/SocialExperiment0.yml +++ b/aeon/dj_pipeline/create_experiments/setup_yml/SocialExperiment0.yml @@ -1,6 +1,6 @@ name: Experiment 0.1 start-time: 2021-11-30 14:00:00 -description: +description: data-path: /ceph/preprocessed/socialexperiment0 time-bin-size: 1 arena: @@ -16,15 +16,15 @@ arena: gates: - position: *o0 clock-synchronizer: - serial-number: + serial-number: port-name: COM6 description: ClockSynchronizer ambient-microphone: - serial-number: + serial-number: description: AudioAmbient sample-rate: 192000 video-controller: - serial-number: + serial-number: port-name: COM3 description: VideoController standard-trigger-frequency: 50 @@ -71,14 +71,14 @@ cameras: trigger-source: HighSpeedTrigger gain: 10 patches: -- serial-number: +- serial-number: port-name: COM4 description: Patch1 position: *o0 radius: 4 starting-torque: 0 - workflow-path: -- serial-number: + workflow-path: +- serial-number: port-name: COM7 description: Patch2 position: *o0 @@ -89,4 +89,3 @@ weight-scales: - serial-number: SCALE1 description: WeightData nest: 1 - diff --git a/aeon/dj_pipeline/docs/datajoint_analysis_diagram.svg b/aeon/dj_pipeline/docs/datajoint_analysis_diagram.svg index 1addc0d8..94087629 100644 --- a/aeon/dj_pipeline/docs/datajoint_analysis_diagram.svg +++ b/aeon/dj_pipeline/docs/datajoint_analysis_diagram.svg @@ -4,7 +4,14 @@ VisitSummary - + VisitSummary @@ -13,7 +20,9 @@ VisitSubjectPosition - + VisitSubjectPosition @@ -22,7 +31,15 @@ VisitSubjectPosition.TimeSlice - + VisitSubjectPosition.TimeSlice @@ -36,7 +53,15 @@ VisitTimeDistribution - + VisitTimeDistribution @@ -45,7 +70,15 @@ ExperimentCamera - + ExperimentCamera @@ -54,7 +87,10 @@ CameraTracking - + CameraTracking @@ -68,7 +104,10 @@ Visit - + Visit @@ -92,7 +131,10 @@ Place - + Place @@ -106,7 +148,13 @@ Chunk - + Chunk @@ -125,7 +173,15 @@ Experiment - + Experiment @@ -144,7 +200,9 @@ Experiment.Subject - + Experiment.Subject @@ -161,4 +219,4 @@ - \ No newline at end of file + diff --git a/aeon/dj_pipeline/docs/datajoint_overview_diagram.svg b/aeon/dj_pipeline/docs/datajoint_overview_diagram.svg index 6cbe98cf..02590b4a 100644 --- a/aeon/dj_pipeline/docs/datajoint_overview_diagram.svg +++ b/aeon/dj_pipeline/docs/datajoint_overview_diagram.svg @@ -4,7 +4,14 @@ Arena - + Arena @@ -13,7 +20,15 @@ Experiment - + Experiment @@ -27,7 +42,14 @@ ExperimentFoodPatch - + ExperimentFoodPatch @@ -36,7 +58,13 @@ FoodPatchWheel - + FoodPatchWheel @@ -50,7 +78,9 @@ WheelState - + WheelState @@ -64,7 +94,13 @@ FoodPatchEvent - + FoodPatchEvent @@ -78,7 +114,8 @@ WeightScale - + WeightScale @@ -87,7 +124,14 @@ ExperimentWeightScale - + ExperimentWeightScale @@ -101,7 +145,8 @@ Camera - + Camera @@ -110,7 +155,15 @@ ExperimentCamera - + ExperimentCamera @@ -124,7 +177,9 @@ Epoch - + Epoch @@ -133,7 +188,13 @@ Chunk - + Chunk @@ -147,7 +208,8 @@ FoodPatch - + FoodPatch @@ -161,7 +223,10 @@ CameraTracking - + CameraTracking @@ -175,7 +240,19 @@ qc.CameraQC - + qc.CameraQC @@ -189,7 +266,15 @@ CameraTracking.Object - + CameraTracking.Object @@ -203,7 +288,10 @@ EventType - + EventType @@ -237,7 +325,13 @@ WeightMeasurement - + WeightMeasurement @@ -284,4 +378,4 @@ - \ No newline at end of file + diff --git a/aeon/dj_pipeline/docs/notebooks/analysis_diagram.svg b/aeon/dj_pipeline/docs/notebooks/analysis_diagram.svg index 1addc0d8..94087629 100644 --- a/aeon/dj_pipeline/docs/notebooks/analysis_diagram.svg +++ b/aeon/dj_pipeline/docs/notebooks/analysis_diagram.svg @@ -4,7 +4,14 @@ VisitSummary - + VisitSummary @@ -13,7 +20,9 @@ VisitSubjectPosition - + VisitSubjectPosition @@ -22,7 +31,15 @@ VisitSubjectPosition.TimeSlice - + VisitSubjectPosition.TimeSlice @@ -36,7 +53,15 @@ VisitTimeDistribution - + VisitTimeDistribution @@ -45,7 +70,15 @@ ExperimentCamera - + ExperimentCamera @@ -54,7 +87,10 @@ CameraTracking - + CameraTracking @@ -68,7 +104,10 @@ Visit - + Visit @@ -92,7 +131,10 @@ Place - + Place @@ -106,7 +148,13 @@ Chunk - + Chunk @@ -125,7 +173,15 @@ Experiment - + Experiment @@ -144,7 +200,9 @@ Experiment.Subject - + Experiment.Subject @@ -161,4 +219,4 @@ - \ No newline at end of file + diff --git a/aeon/dj_pipeline/docs/notebooks/diagram.ipynb b/aeon/dj_pipeline/docs/notebooks/diagram.ipynb index 3610ed41..fc6734d2 100644 --- a/aeon/dj_pipeline/docs/notebooks/diagram.ipynb +++ b/aeon/dj_pipeline/docs/notebooks/diagram.ipynb @@ -898,4 +898,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/aeon/dj_pipeline/docs/notebooks/diagram.svg b/aeon/dj_pipeline/docs/notebooks/diagram.svg index a0941dfb..d7872725 100644 --- a/aeon/dj_pipeline/docs/notebooks/diagram.svg +++ b/aeon/dj_pipeline/docs/notebooks/diagram.svg @@ -4,7 +4,10 @@ CameraTracking - + CameraTracking @@ -13,7 +16,15 @@ CameraTracking.Object - + CameraTracking.Object @@ -27,7 +38,8 @@ WeightScale - + WeightScale @@ -36,7 +48,14 @@ ExperimentWeightScale - + ExperimentWeightScale @@ -50,7 +69,9 @@ WheelState - + WheelState @@ -59,7 +80,14 @@ ExperimentFoodPatch - + ExperimentFoodPatch @@ -73,7 +101,13 @@ FoodPatchWheel - + FoodPatchWheel @@ -87,7 +121,13 @@ FoodPatchEvent - + FoodPatchEvent @@ -101,7 +141,13 @@ WeightMeasurement - + WeightMeasurement @@ -110,7 +156,14 @@ Arena - + Arena @@ -119,7 +172,15 @@ Experiment - + Experiment @@ -138,7 +199,10 @@ EventType - + EventType @@ -152,7 +216,15 @@ ExperimentCamera - + ExperimentCamera @@ -166,7 +238,19 @@ qc.CameraQC - + qc.CameraQC @@ -180,7 +264,8 @@ Camera - + Camera @@ -194,7 +279,9 @@ Epoch - + Epoch @@ -203,7 +290,13 @@ Chunk - + Chunk @@ -272,7 +365,8 @@ FoodPatch - + FoodPatch @@ -284,4 +378,4 @@ - \ No newline at end of file + diff --git a/aeon/dj_pipeline/docs/notebooks/social_experiments_block_analysis.ipynb b/aeon/dj_pipeline/docs/notebooks/social_experiments_block_analysis.ipynb index 6a32776b..02b0b919 100644 --- a/aeon/dj_pipeline/docs/notebooks/social_experiments_block_analysis.ipynb +++ b/aeon/dj_pipeline/docs/notebooks/social_experiments_block_analysis.ipynb @@ -208,4 +208,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/aeon/dj_pipeline/streams.py b/aeon/dj_pipeline/streams.py index 4cd482a0..e3d6ba12 100644 --- a/aeon/dj_pipeline/streams.py +++ b/aeon/dj_pipeline/streams.py @@ -14,7 +14,7 @@ schema = dj.Schema(get_schema_name("streams")) -@schema +@schema class StreamType(dj.Lookup): """Catalog of all steam types for the different device types used across Project Aeon. One StreamType corresponds to one reader class in `aeon.io.reader`. The combination of `stream_reader` and `stream_reader_kwargs` should fully specify the data loading routine for a particular device, using the `aeon.io.utils`.""" @@ -29,7 +29,7 @@ class StreamType(dj.Lookup): """ -@schema +@schema class DeviceType(dj.Lookup): """Catalog of all device types used across Project Aeon.""" @@ -46,7 +46,7 @@ class Stream(dj.Part): """ -@schema +@schema class Device(dj.Lookup): definition = """ # Physical devices, of a particular type, identified by unique serial number device_serial_number: varchar(12) @@ -55,7 +55,7 @@ class Device(dj.Lookup): """ -@schema +@schema class RfidReader(dj.Manual): definition = f""" # rfid_reader placement and operation for a particular time period, at a certain location, for a given experiment (auto-generated with aeon_mecha-unknown) @@ -82,7 +82,7 @@ class RemovalTime(dj.Part): """ -@schema +@schema class SpinnakerVideoSource(dj.Manual): definition = f""" # spinnaker_video_source placement and operation for a particular time period, at a certain location, for a given experiment (auto-generated with aeon_mecha-unknown) @@ -109,7 +109,7 @@ class RemovalTime(dj.Part): """ -@schema +@schema class UndergroundFeeder(dj.Manual): definition = f""" # underground_feeder placement and operation for a particular time period, at a certain location, for a given experiment (auto-generated with aeon_mecha-unknown) @@ -136,7 +136,7 @@ class RemovalTime(dj.Part): """ -@schema +@schema class WeightScale(dj.Manual): definition = f""" # weight_scale placement and operation for a particular time period, at a certain location, for a given experiment (auto-generated with aeon_mecha-unknown) @@ -163,7 +163,7 @@ class RemovalTime(dj.Part): """ -@schema +@schema class RfidReaderRfidEvents(dj.Imported): definition = """ # Raw per-chunk RfidEvents data stream from RfidReader (auto-generated with aeon_mecha-unknown) -> RfidReader @@ -838,5 +838,3 @@ def make(self, key): }, ignore_extra_fields=True, ) - - diff --git a/aeon/dj_pipeline/utils/device_type_mapper.json b/aeon/dj_pipeline/utils/device_type_mapper.json index 7f041bd5..a28caebe 100644 --- a/aeon/dj_pipeline/utils/device_type_mapper.json +++ b/aeon/dj_pipeline/utils/device_type_mapper.json @@ -1 +1 @@ -{"ClockSynchronizer": "TimestampGenerator", "VideoController": "CameraController", "CameraTop": "SpinnakerVideoSource", "CameraWest": "SpinnakerVideoSource", "CameraEast": "SpinnakerVideoSource", "CameraNorth": "SpinnakerVideoSource", "CameraSouth": "SpinnakerVideoSource", "CameraNest": "SpinnakerVideoSource", "CameraPatch1": "SpinnakerVideoSource", "CameraPatch2": "SpinnakerVideoSource", "CameraPatch3": "SpinnakerVideoSource", "AudioAmbient": "AudioSource", "Patch1": "UndergroundFeeder", "Patch2": "UndergroundFeeder", "Patch3": "UndergroundFeeder", "Nest": "WeightScale", "RfidNest1": "RfidReader", "RfidNest2": "RfidReader", "RfidGate": "RfidReader", "RfidPatch1": "RfidReader", "RfidPatch2": "RfidReader", "RfidPatch3": "RfidReader", "LightCycle": "EnvironmentCondition"} \ No newline at end of file +{"ClockSynchronizer": "TimestampGenerator", "VideoController": "CameraController", "CameraTop": "SpinnakerVideoSource", "CameraWest": "SpinnakerVideoSource", "CameraEast": "SpinnakerVideoSource", "CameraNorth": "SpinnakerVideoSource", "CameraSouth": "SpinnakerVideoSource", "CameraNest": "SpinnakerVideoSource", "CameraPatch1": "SpinnakerVideoSource", "CameraPatch2": "SpinnakerVideoSource", "CameraPatch3": "SpinnakerVideoSource", "AudioAmbient": "AudioSource", "Patch1": "UndergroundFeeder", "Patch2": "UndergroundFeeder", "Patch3": "UndergroundFeeder", "Nest": "WeightScale", "RfidNest1": "RfidReader", "RfidNest2": "RfidReader", "RfidGate": "RfidReader", "RfidPatch1": "RfidReader", "RfidPatch2": "RfidReader", "RfidPatch3": "RfidReader", "LightCycle": "EnvironmentCondition"} diff --git a/aeon/dj_pipeline/webapps/sciviz/apk_requirements.txt b/aeon/dj_pipeline/webapps/sciviz/apk_requirements.txt index c9d2ebed..5ab05441 100644 --- a/aeon/dj_pipeline/webapps/sciviz/apk_requirements.txt +++ b/aeon/dj_pipeline/webapps/sciviz/apk_requirements.txt @@ -1,2 +1,2 @@ bash -git \ No newline at end of file +git From cc9c92480405f8eaf1cde281b6d74dbe008235a7 Mon Sep 17 00:00:00 2001 From: lochhh Date: Thu, 12 Sep 2024 20:17:30 +0100 Subject: [PATCH 13/36] Ruff autofix --- aeon/analysis/__init__.py | 1 - aeon/dj_pipeline/analysis/block_analysis.py | 12 +++++------- aeon/io/__init__.py | 1 - aeon/qc/__init__.py | 1 - aeon/schema/social_01.py | 2 -- aeon/schema/social_02.py | 6 ------ aeon/schema/social_03.py | 1 - 7 files changed, 5 insertions(+), 19 deletions(-) diff --git a/aeon/analysis/__init__.py b/aeon/analysis/__init__.py index 792d6005..e69de29b 100644 --- a/aeon/analysis/__init__.py +++ b/aeon/analysis/__init__.py @@ -1 +0,0 @@ -# diff --git a/aeon/dj_pipeline/analysis/block_analysis.py b/aeon/dj_pipeline/analysis/block_analysis.py index 19566357..9e4a08a5 100644 --- a/aeon/dj_pipeline/analysis/block_analysis.py +++ b/aeon/dj_pipeline/analysis/block_analysis.py @@ -248,8 +248,7 @@ def make(self, key): ) # update block_end if last timestamp of encoder_df is before the current block_end - if encoder_df.index[-1] < block_end: - block_end = encoder_df.index[-1] + block_end = min(encoder_df.index[-1], block_end) # Subject data # Get all unique subjects that visited the environment over the entire exp; @@ -311,8 +310,7 @@ def make(self, key): ) # update block_end if last timestamp of pos_df is before the current block_end - if pos_df.index[-1] < block_end: - block_end = pos_df.index[-1] + block_end = min(pos_df.index[-1], block_end) if block_end != (Block & key).fetch1("block_end"): Block.update1({**key, "block_end": block_end}) @@ -474,9 +472,9 @@ def make(self, key): ) subject_in_patch = in_patch[subject_name] subject_in_patch_cum_time = subject_in_patch.cumsum().values * dt - all_subj_patch_pref_dict[patch["patch_name"]][subject_name][ - "cum_time" - ] = subject_in_patch_cum_time + all_subj_patch_pref_dict[patch["patch_name"]][subject_name]["cum_time"] = ( + subject_in_patch_cum_time + ) subj_pellets = closest_subjects_pellet_ts[closest_subjects_pellet_ts == subject_name] self.Patch.insert1( key diff --git a/aeon/io/__init__.py b/aeon/io/__init__.py index 792d6005..e69de29b 100644 --- a/aeon/io/__init__.py +++ b/aeon/io/__init__.py @@ -1 +0,0 @@ -# diff --git a/aeon/qc/__init__.py b/aeon/qc/__init__.py index 792d6005..e69de29b 100644 --- a/aeon/qc/__init__.py +++ b/aeon/qc/__init__.py @@ -1 +0,0 @@ -# diff --git a/aeon/schema/social_01.py b/aeon/schema/social_01.py index 4fee2d94..7f6e2ab0 100644 --- a/aeon/schema/social_01.py +++ b/aeon/schema/social_01.py @@ -3,7 +3,6 @@ class RfidEvents(Stream): - def __init__(self, path): path = path.replace("Rfid", "") if path.startswith("Events"): @@ -13,6 +12,5 @@ def __init__(self, path): class Pose(Stream): - def __init__(self, path): super().__init__(_reader.Pose(f"{path}_node-0*")) diff --git a/aeon/schema/social_02.py b/aeon/schema/social_02.py index dd501d72..04946679 100644 --- a/aeon/schema/social_02.py +++ b/aeon/schema/social_02.py @@ -4,7 +4,6 @@ class Environment(StreamGroup): - def __init__(self, path): super().__init__(path) @@ -45,25 +44,21 @@ def __init__(self, path): class Pose(Stream): - def __init__(self, path): super().__init__(_reader.Pose(f"{path}_test-node1*")) class WeightRaw(Stream): - def __init__(self, path): super().__init__(_reader.Harp(f"{path}_200_*", ["weight(g)", "stability"])) class WeightFiltered(Stream): - def __init__(self, path): super().__init__(_reader.Harp(f"{path}_202_*", ["weight(g)", "stability"])) class Patch(StreamGroup): - def __init__(self, path): super().__init__(path) @@ -89,6 +84,5 @@ def __init__(self, path): class RfidEvents(Stream): - def __init__(self, path): super().__init__(_reader.Harp(f"{path}_32*", ["rfid"])) diff --git a/aeon/schema/social_03.py b/aeon/schema/social_03.py index 558b39c9..18b05eec 100644 --- a/aeon/schema/social_03.py +++ b/aeon/schema/social_03.py @@ -3,6 +3,5 @@ class Pose(Stream): - def __init__(self, path): super().__init__(_reader.Pose(f"{path}_202_*")) From f225406306c27f803fdcc8bd2ae3a56536c2adf6 Mon Sep 17 00:00:00 2001 From: lochhh Date: Thu, 12 Sep 2024 20:27:57 +0100 Subject: [PATCH 14/36] Fix D103 Missing docstring in public function --- aeon/analysis/utils.py | 1 + aeon/io/reader.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/aeon/analysis/utils.py b/aeon/analysis/utils.py index 4c229771..89534660 100644 --- a/aeon/analysis/utils.py +++ b/aeon/analysis/utils.py @@ -101,6 +101,7 @@ def rate(events, window, frequency, weight=1, start=None, end=None, smooth=None, def get_events_rates( events, window_len_sec, frequency, unit_len_sec=60, start=None, end=None, smooth=None, center=False ): + """Computes the event rate from a sequence of events over a specified window.""" # events is an array with the time (in seconds) of event occurence # window_len_sec is the size of the window over which the event rate is estimated # unit_len_sec is the length of one sample point diff --git a/aeon/io/reader.py b/aeon/io/reader.py index 0dcae1f2..e48e608d 100644 --- a/aeon/io/reader.py +++ b/aeon/io/reader.py @@ -414,6 +414,7 @@ def get_config_file(cls, config_file_dir: Path, config_file_names: None | list[s def from_dict(data, pattern=None): + """Converts a dictionary to a DotMap object.""" reader_type = data.get("type", None) if reader_type is not None: kwargs = {k: v for k, v in data.items() if k != "type"} @@ -425,6 +426,7 @@ def from_dict(data, pattern=None): def to_dict(dotmap): + """Converts a DotMap object to a dictionary.""" if isinstance(dotmap, Reader): kwargs = {k: v for k, v in vars(dotmap).items() if k not in ["pattern"] and not k.startswith("_")} kwargs["type"] = type(dotmap).__name__ From c7e74f7386067acd98b987fc174abe134f6a1890 Mon Sep 17 00:00:00 2001 From: lochhh Date: Thu, 12 Sep 2024 21:15:12 +0100 Subject: [PATCH 15/36] Fix D415 First line should end with a period, question mark, or exclamation point --- aeon/dj_pipeline/__init__.py | 4 +++- aeon/dj_pipeline/acquisition.py | 14 ++++++------ .../scripts/clone_and_freeze_exp02.py | 10 +++++---- tests/conftest.py | 22 +++++-------------- tests/dj_pipeline/test_tracking.py | 3 +-- 5 files changed, 23 insertions(+), 30 deletions(-) diff --git a/aeon/dj_pipeline/__init__.py b/aeon/dj_pipeline/__init__.py index fc5e6fef..5a6fa1c3 100644 --- a/aeon/dj_pipeline/__init__.py +++ b/aeon/dj_pipeline/__init__.py @@ -31,7 +31,9 @@ def dict_to_uuid(key) -> uuid.UUID: def fetch_stream(query, drop_pk=True): - """Provided a query containing data from a Stream table, + """Fetches data from a Stream table based on a query and returns it as a DataFrame. + + Provided a query containing data from a Stream table, fetch and aggregate the data into one DataFrame indexed by "time" """ df = (query & "sample_count > 0").fetch(format="frame").reset_index() diff --git a/aeon/dj_pipeline/acquisition.py b/aeon/dj_pipeline/acquisition.py index 973d7c07..f8048566 100644 --- a/aeon/dj_pipeline/acquisition.py +++ b/aeon/dj_pipeline/acquisition.py @@ -1,18 +1,18 @@ import datetime +import json import pathlib import re + import datajoint as dj import numpy as np import pandas as pd -import json -from aeon.io import api as io_api -from aeon.schema import schemas as aeon_schemas -from aeon.io import reader as io_reader from aeon.analysis import utils as analysis_utils - from aeon.dj_pipeline import get_schema_name, lab, subject from aeon.dj_pipeline.utils import paths +from aeon.io import api as io_api +from aeon.io import reader as io_reader +from aeon.schema import schemas as aeon_schemas logger = dj.logger schema = dj.schema(get_schema_name("acquisition")) @@ -181,7 +181,7 @@ class Epoch(dj.Manual): @classmethod def ingest_epochs(cls, experiment_name): - """Ingest epochs for the specified "experiment_name" """ + """Ingest epochs for the specified ``experiment_name``.""" device_name = _ref_device_mapping.get(experiment_name, "CameraTop") all_chunks, raw_data_dirs = _get_all_chunks(experiment_name, device_name) @@ -604,7 +604,7 @@ def _match_experiment_directory(experiment_name, path, directories): def create_chunk_restriction(experiment_name, start_time, end_time): - """Create a time restriction string for the chunks between the specified "start" and "end" times""" + """Create a time restriction string for the chunks between the specified "start" and "end" times.""" start_restriction = f'"{start_time}" BETWEEN chunk_start AND chunk_end' end_restriction = f'"{end_time}" BETWEEN chunk_start AND chunk_end' start_query = Chunk & {"experiment_name": experiment_name} & start_restriction diff --git a/aeon/dj_pipeline/scripts/clone_and_freeze_exp02.py b/aeon/dj_pipeline/scripts/clone_and_freeze_exp02.py index 859f6e73..740932b2 100644 --- a/aeon/dj_pipeline/scripts/clone_and_freeze_exp02.py +++ b/aeon/dj_pipeline/scripts/clone_and_freeze_exp02.py @@ -1,11 +1,12 @@ -"""Jan 2024 -Cloning and archiving schemas and data for experiment 0.2. +"""Jan 2024: Cloning and archiving schemas and data for experiment 0.2. + The pipeline code associated with this archived data pipeline is here https://github.com/SainsburyWellcomeCentre/aeon_mecha/releases/tag/dj_exp02_stable """ -import os import inspect +import os + import datajoint as dj from datajoint_utilities.dj_data_copy import db_migration from datajoint_utilities.dj_data_copy.pipeline_cloning import ClonedPipeline @@ -58,7 +59,8 @@ def data_copy(restriction, table_block_list, batch_size=None): def validate(): - """Validation of schemas migration + """Validates schemas migration. + 1. for the provided list of schema names - validate all schemas have been migrated 2. for each schema - validate all tables have been migrated 3. for each table, validate all entries have been migrated diff --git a/tests/conftest.py b/tests/conftest.py index 1e1bb12e..3604890f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,4 +1,6 @@ -"""# run all tests: +"""Global configurations and fixtures for pytest. + +# run all tests: # pytest -sv --cov-report term-missing --cov=aeon_mecha -p no:warnings tests/dj_pipeline # run one test, debug: @@ -19,13 +21,12 @@ def data_dir(): - """Returns test data directory""" + """Returns test data directory.""" return os.path.join(os.path.dirname(os.path.realpath(__file__)), "data") @pytest.fixture(autouse=True, scope="session") def test_params(): - return { "start_ts": "2022-06-22 08:51:10", "end_ts": "2022-06-22 14:00:00", @@ -46,7 +47,7 @@ def test_params(): @pytest.fixture(autouse=True, scope="session") def dj_config(): - """If dj_local_config exists, load""" + """Configures DataJoint connection and loads custom settings.""" dj_config_fp = pathlib.Path("dj_local_conf.json") assert dj_config_fp.exists() dj.config.load(dj_config_fp) @@ -56,16 +57,7 @@ def dj_config(): def load_pipeline(): - - from aeon.dj_pipeline import ( - acquisition, - analysis, - lab, - qc, - report, - subject, - tracking, - ) + from aeon.dj_pipeline import acquisition, analysis, lab, qc, report, subject, tracking return { "subject": subject, @@ -79,7 +71,6 @@ def load_pipeline(): def drop_schema(): - _pipeline = load_pipeline() _pipeline["report"].schema.drop() @@ -95,7 +86,6 @@ def drop_schema(): @pytest.fixture(autouse=True, scope="session") def pipeline(dj_config): - _pipeline = load_pipeline() yield _pipeline diff --git a/tests/dj_pipeline/test_tracking.py b/tests/dj_pipeline/test_tracking.py index 434bc008..973e0741 100644 --- a/tests/dj_pipeline/test_tracking.py +++ b/tests/dj_pipeline/test_tracking.py @@ -12,7 +12,7 @@ def save_test_data(pipeline, test_params): - """Save test dataset fetched from tracking.CameraTracking.Object""" + """Save test dataset fetched from tracking.CameraTracking.Object.""" tracking = pipeline["tracking"] key = tracking.CameraTracking.Object().fetch("KEY")[index] @@ -36,7 +36,6 @@ def save_test_data(pipeline, test_params): @mark.ingestion @mark.tracking def test_camera_tracking_ingestion(test_params, pipeline, camera_tracking_ingestion): - tracking = pipeline["tracking"] assert len(tracking.CameraTracking.Object()) == test_params["camera_tracking_object_count"] From d9d02873d7c34c8c773454d61a118baccffb2169 Mon Sep 17 00:00:00 2001 From: lochhh Date: Thu, 12 Sep 2024 21:20:34 +0100 Subject: [PATCH 16/36] Ignore deprecated PT004 --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 5283a14a..985b84a0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -108,6 +108,7 @@ lint.ignore = [ "E731", "E702", "S101", + "PT004", # Rule `PT004` is deprecated and will be removed in a future release. "PT013", "PLR0912", "PLR0913", From c866ab9d1ed4cfc7b039c13983c297123a3ed125 Mon Sep 17 00:00:00 2001 From: lochhh Date: Thu, 12 Sep 2024 21:22:34 +0100 Subject: [PATCH 17/36] Fix D417 Missing argument description in the docstring --- aeon/dj_pipeline/utils/load_metadata.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aeon/dj_pipeline/utils/load_metadata.py b/aeon/dj_pipeline/utils/load_metadata.py index aceabeec..ce1c2775 100644 --- a/aeon/dj_pipeline/utils/load_metadata.py +++ b/aeon/dj_pipeline/utils/load_metadata.py @@ -115,11 +115,12 @@ def insert_device_types(devices_schema: DotMap, metadata_yml_filepath: Path): streams.Device.insert(new_devices) -def extract_epoch_config(experiment_name: str, devices_schema, metadata_yml_filepath: str) -> dict: +def extract_epoch_config(experiment_name: str, devices_schema: DotMap, metadata_yml_filepath: str) -> dict: """Parse experiment metadata YAML file and extract epoch configuration. Args: experiment_name (str): Name of the experiment. + devices_schema (DotMap): DotMap object (e.g., exp02, octagon01) metadata_yml_filepath (str): path to the metadata YAML file. Returns: From 75af9b82d0709bbb62160e637425ee80b2985b5e Mon Sep 17 00:00:00 2001 From: lochhh Date: Thu, 12 Sep 2024 21:27:04 +0100 Subject: [PATCH 18/36] Ignore E741 check for `h, l, s` assignment --- aeon/analysis/block_plotting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aeon/analysis/block_plotting.py b/aeon/analysis/block_plotting.py index be86b68f..67ebed32 100644 --- a/aeon/analysis/block_plotting.py +++ b/aeon/analysis/block_plotting.py @@ -32,7 +32,7 @@ def gen_hex_grad(hex_col, vals, min_l=0.3): """Generates an array of hex color values based on a gradient defined by unit-normalized values.""" # Convert hex to rgb to hls - h, l, s = rgb_to_hls(*[int(hex_col.lstrip("#")[i : i + 2], 16) / 255 for i in (0, 2, 4)]) + h, l, s = rgb_to_hls(*[int(hex_col.lstrip("#")[i : i + 2], 16) / 255 for i in (0, 2, 4)]) # noqa: E741 grad = np.empty(shape=(len(vals),), dtype=" Date: Thu, 12 Sep 2024 21:41:06 +0100 Subject: [PATCH 19/36] Use redundant import alias as suggested in F401 --- aeon/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aeon/__init__.py b/aeon/__init__.py index b59e77aa..2a691c53 100644 --- a/aeon/__init__.py +++ b/aeon/__init__.py @@ -9,5 +9,5 @@ finally: del version, PackageNotFoundError -# Set functions avaialable directly under the 'aeon' top-level namespace -from aeon.io.api import load +# Set functions available directly under the 'aeon' top-level namespace +from aeon.io.api import load as load # noqa: PLC0414 From a5d88a226b74a2f3d205533433828eacfcaa2ed3 Mon Sep 17 00:00:00 2001 From: lochhh Date: Thu, 12 Sep 2024 21:44:41 +0100 Subject: [PATCH 20/36] Re-enable ruff in pre-commit --- .pre-commit-config.yaml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ed735be5..95cefb61 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,16 +21,16 @@ repos: - id: trailing-whitespace args: [--markdown-linebreak-ext=md] - # - repo: https://github.com/astral-sh/ruff-pre-commit - # rev: v0.6.4 - # hooks: - # # Run the linter with the `--fix` flag. - # - id: ruff - # types_or: [ python, pyi ] - # args: [ --fix ] - # # Run the formatter. - # - id: ruff-format - # types_or: [ python, pyi ] + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.6.4 + hooks: + # Run the linter with the `--fix` flag. + - id: ruff + types_or: [ python, pyi ] # Ignore .ipynb files + args: [ --fix ] + # Run the formatter. + - id: ruff-format + types_or: [ python, pyi ] # Ignore .ipynb files # - repo: https://github.com/RobertCraigie/pyright-python # rev: v1.1.380 From 7fe4837a569792e8ab4d90e609a1698dd808f5bd Mon Sep 17 00:00:00 2001 From: lochhh Date: Thu, 12 Sep 2024 21:54:08 +0100 Subject: [PATCH 21/36] Re-enable pyright in pre-commit --- .pre-commit-config.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 95cefb61..3604c34b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -32,11 +32,11 @@ repos: - id: ruff-format types_or: [ python, pyi ] # Ignore .ipynb files - # - repo: https://github.com/RobertCraigie/pyright-python - # rev: v1.1.380 - # hooks: - # - id: pyright - # args: [--level, error, --project, ./pyproject.toml] + - repo: https://github.com/RobertCraigie/pyright-python + rev: v1.1.380 + hooks: + - id: pyright + args: [--level, error, --project, ./pyproject.toml] # Pytest is expensive, so we show its set-up but leave it commented out. # - repo: local From b0714abaefcfde574d10a88e9470d29609817000 Mon Sep 17 00:00:00 2001 From: lochhh Date: Thu, 12 Sep 2024 22:41:57 +0100 Subject: [PATCH 22/36] Configure ruff to ignore .ipynb files --- .pre-commit-config.yaml | 2 -- pyproject.toml | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3604c34b..21cbac48 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,11 +26,9 @@ repos: hooks: # Run the linter with the `--fix` flag. - id: ruff - types_or: [ python, pyi ] # Ignore .ipynb files args: [ --fix ] # Run the formatter. - id: ruff-format - types_or: [ python, pyi ] # Ignore .ipynb files - repo: https://github.com/RobertCraigie/pyright-python rev: v1.1.380 diff --git a/pyproject.toml b/pyproject.toml index 985b84a0..541bf4c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -118,6 +118,7 @@ extend-exclude = [ ".git", ".github", ".idea", + "*.ipynb", ".vscode", "aeon/dj_pipeline/streams.py", ] From 68e43445afc3cc58fc922ae82a2bce3e069742d2 Mon Sep 17 00:00:00 2001 From: lochhh Date: Thu, 12 Sep 2024 22:56:06 +0100 Subject: [PATCH 23/36] Remove ruff `--config` in build_env_run_tests workflow --- .github/workflows/build_env_run_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_env_run_tests.yml b/.github/workflows/build_env_run_tests.yml index 9372a4ca..3a8783f6 100644 --- a/.github/workflows/build_env_run_tests.yml +++ b/.github/workflows/build_env_run_tests.yml @@ -65,7 +65,7 @@ jobs: # Only run codebase checks and tests for ubuntu. - name: ruff if: matrix.os == 'ubuntu-latest' - run: python -m ruff check --config ./pyproject.toml . + run: python -m ruff check . - name: pyright if: matrix.os == 'ubuntu-latest' run: python -m pyright --level error --project ./pyproject.toml . From df20e9f386ab58bbdb0daeb53182b33afea989f4 Mon Sep 17 00:00:00 2001 From: glopesdev Date: Wed, 18 Sep 2024 16:57:30 +0100 Subject: [PATCH 24/36] Apply remaining ruff recommendations --- aeon/dj_pipeline/analysis/block_analysis.py | 56 +++++++++---------- aeon/dj_pipeline/populate/worker.py | 6 -- aeon/dj_pipeline/utils/streams_maker.py | 4 +- aeon/io/reader.py | 23 +++++--- .../test_pipeline_instantiation.py | 1 - tests/dj_pipeline/test_qc.py | 1 - tests/io/test_api.py | 7 ++- 7 files changed, 49 insertions(+), 49 deletions(-) diff --git a/aeon/dj_pipeline/analysis/block_analysis.py b/aeon/dj_pipeline/analysis/block_analysis.py index 9e4a08a5..b9baecdd 100644 --- a/aeon/dj_pipeline/analysis/block_analysis.py +++ b/aeon/dj_pipeline/analysis/block_analysis.py @@ -478,15 +478,15 @@ def make(self, key): subj_pellets = closest_subjects_pellet_ts[closest_subjects_pellet_ts == subject_name] self.Patch.insert1( key - | dict( - patch_name=patch["patch_name"], - subject_name=subject_name, - in_patch_timestamps=subject_in_patch.index.values, - in_patch_time=subject_in_patch_cum_time[-1], - pellet_count=len(subj_pellets), - pellet_timestamps=subj_pellets.index.values, - wheel_cumsum_distance_travelled=cum_wheel_dist_subj_df[subject_name].values, - ) + | { + "patch_name": patch["patch_name"], + "subject_name": subject_name, + "in_patch_timestamps": subject_in_patch.index.values, + "in_patch_time": subject_in_patch_cum_time[-1], + "pellet_count": len(subj_pellets), + "pellet_timestamps": subj_pellets.index.values, + "wheel_cumsum_distance_travelled": cum_wheel_dist_subj_df[subject_name].values, + } ) # Now that we have computed all individual patch and subject values, we iterate again through @@ -513,14 +513,14 @@ def make(self, key): self.Preference.insert1( key - | dict( - patch_name=patch_name, - subject_name=subject_name, - cumulative_preference_by_time=cum_pref_time, - cumulative_preference_by_wheel=cum_pref_dist, - final_preference_by_time=cum_pref_time[-1], - final_preference_by_wheel=cum_pref_dist[-1], - ) + | { + "patch_name": patch_name, + "subject_name": subject_name, + "cumulative_preference_by_time": cum_pref_time, + "cumulative_preference_by_wheel": cum_pref_dist, + "final_preference_by_time": cum_pref_time[-1], + "final_preference_by_wheel": cum_pref_dist[-1], + } ) @@ -695,11 +695,11 @@ def make(self, key): x=wheel_ts, y=cum_pref, mode="lines", # + markers", - line=dict( - width=2, - color=subject_colors[subj_i], - dash=patch_markers_linestyles[patch_i], - ), + line={ + "width": 2, + "color": subject_colors[subj_i], + "dash": patch_markers_linestyles[patch_i], + }, name=f"{subj} - {p}: μ: {patch_mean}", ) ) @@ -717,13 +717,13 @@ def make(self, key): x=cur_cum_pel_ct["time"], y=cur_cum_pel_ct["cum_pref"], mode="markers", - marker=dict( - symbol=patch_markers[patch_i], - color=gen_hex_grad( + marker={ + "symbol": patch_markers[patch_i], + "color": gen_hex_grad( subject_colors[-1], cur_cum_pel_ct["norm_thresh_val"] ), - size=8, - ), + "size": 8, + }, showlegend=False, customdata=np.stack((cur_cum_pel_ct["threshold"],), axis=-1), hovertemplate="Threshold: %{customdata[0]:.2f} cm", @@ -735,7 +735,7 @@ def make(self, key): title=f"Cumulative Patch Preference - {title}", xaxis_title="Time", yaxis_title="Pref Index", - yaxis=dict(tickvals=np.arange(0, 1.1, 0.1)), + yaxis={"tickvals": np.arange(0, 1.1, 0.1)}, ) # Insert figures as json-formatted plotly plots diff --git a/aeon/dj_pipeline/populate/worker.py b/aeon/dj_pipeline/populate/worker.py index 6f4095ef..b25a73db 100644 --- a/aeon/dj_pipeline/populate/worker.py +++ b/aeon/dj_pipeline/populate/worker.py @@ -44,12 +44,6 @@ def ingest_epochs_chunks(): acquisition.Chunk.ingest_chunks(experiment_name) -def ingest_environment_visits(): - """Extract and insert complete visits for experiments specified in AutomatedExperimentIngestion.""" - experiment_names = AutomatedExperimentIngestion.fetch("experiment_name") - # analysis.ingest_environment_visits(experiment_names) - - # ---- Define worker(s) ---- # configure a worker to process `acquisition`-related tasks acquisition_worker = DataJointWorker( diff --git a/aeon/dj_pipeline/utils/streams_maker.py b/aeon/dj_pipeline/utils/streams_maker.py index 3e5acafc..78e5ebaf 100644 --- a/aeon/dj_pipeline/utils/streams_maker.py +++ b/aeon/dj_pipeline/utils/streams_maker.py @@ -126,8 +126,8 @@ def get_device_stream_template(device_type: str, stream_type: str, streams_modul for col in stream.columns: if col.startswith("_"): continue - col = re.sub(r"\([^)]*\)", "", col) - table_definition += f"{col}: longblob\n " + new_col = re.sub(r"\([^)]*\)", "", col) + table_definition += f"{new_col}: longblob\n " class DeviceDataStream(dj.Imported): definition = table_definition diff --git a/aeon/io/reader.py b/aeon/io/reader.py index 445a0888..ba5c8fa4 100644 --- a/aeon/io/reader.py +++ b/aeon/io/reader.py @@ -11,8 +11,8 @@ from dotmap import DotMap from aeon import util -from aeon.io.api import chunk, chunk_key from aeon.io.api import aeon as aeon_time +from aeon.io.api import chunk, chunk_key _SECONDS_PER_TICK = 32e-6 _payloadtypes = { @@ -188,8 +188,10 @@ def __init__(self, pattern): super().__init__(pattern, columns=["angle", "intensity"]) def read(self, file, downsample=True): - """Reads encoder data from the specified Harp binary file, and optionally downsamples - the frequency to 50Hz. + """Reads encoder data from the specified Harp binary file. + + By default the encoder data is downsampled to 50Hz. Setting downsample to + False or None can be used to force the raw data to be returned. """ data = super().read(file) if downsample is True: @@ -200,7 +202,7 @@ def read(self, file, downsample=True): if first_index is not None: # since data is absolute angular position we decimate by taking first of each bin chunk_origin = chunk(first_index) - data = data.resample('20ms', origin=chunk_origin).first() + data = data.resample("20ms", origin=chunk_origin).first() return data @@ -319,15 +321,17 @@ def read(self, file: Path) -> pd.DataFrame: parts = self.get_bodyparts(config_file) # Using bodyparts, assign column names to Harp register values, and read data in default format. + BONSAI_SLEAP_V2 = 0.2 + BONSAI_SLEAP_V3 = 0.3 try: # Bonsai.Sleap0.2 - bonsai_sleap_v = 0.2 + bonsai_sleap_v = BONSAI_SLEAP_V2 columns = ["identity", "identity_likelihood"] for part in parts: columns.extend([f"{part}_x", f"{part}_y", f"{part}_likelihood"]) self.columns = columns data = super().read(file) except ValueError: # column mismatch; Bonsai.Sleap0.3 - bonsai_sleap_v = 0.3 + bonsai_sleap_v = BONSAI_SLEAP_V3 columns = ["identity"] columns.extend([f"{identity}_likelihood" for identity in identities]) for part in parts: @@ -352,10 +356,13 @@ def read(self, file: Path) -> pd.DataFrame: new_columns = ["identity", "identity_likelihood", "part", "x", "y", "part_likelihood"] new_data = pd.DataFrame(columns=new_columns) for i, part in enumerate(parts): - part_columns = columns[0 : (len(identities) + 1)] if bonsai_sleap_v == 0.3 else columns[0:2] + part_columns = ( + columns[0 : (len(identities) + 1)] if bonsai_sleap_v == BONSAI_SLEAP_V3 else columns[0:2] + ) part_columns.extend([f"{part}_x", f"{part}_y", f"{part}_likelihood"]) part_data = pd.DataFrame(data[part_columns]) - if bonsai_sleap_v == 0.3: # combine all identity_likelihood cols into a single col as dict + if bonsai_sleap_v == BONSAI_SLEAP_V3: + # combine all identity_likelihood cols into a single col as dict part_data["identity_likelihood"] = part_data.apply( lambda row: {identity: row[f"{identity}_likelihood"] for identity in identities}, axis=1 ) diff --git a/tests/dj_pipeline/test_pipeline_instantiation.py b/tests/dj_pipeline/test_pipeline_instantiation.py index 48cfe8ea..cb3b51fb 100644 --- a/tests/dj_pipeline/test_pipeline_instantiation.py +++ b/tests/dj_pipeline/test_pipeline_instantiation.py @@ -3,7 +3,6 @@ @mark.instantiation def test_pipeline_instantiation(pipeline): - assert hasattr(pipeline["acquisition"], "FoodPatchEvent") assert hasattr(pipeline["lab"], "Arena") assert hasattr(pipeline["qc"], "CameraQC") diff --git a/tests/dj_pipeline/test_qc.py b/tests/dj_pipeline/test_qc.py index bfe248fc..9815031e 100644 --- a/tests/dj_pipeline/test_qc.py +++ b/tests/dj_pipeline/test_qc.py @@ -3,7 +3,6 @@ @mark.qc def test_camera_qc_ingestion(test_params, pipeline, camera_qc_ingestion): - qc = pipeline["qc"] assert len(qc.CameraQC()) == test_params["camera_qc_count"] diff --git a/tests/io/test_api.py b/tests/io/test_api.py index 4f3d91d5..095439de 100644 --- a/tests/io/test_api.py +++ b/tests/io/test_api.py @@ -48,6 +48,7 @@ def test_load_nonmonotonic(): @mark.api def test_load_encoder_with_downsampling(): + DOWNSAMPLE_PERIOD = 0.02 data = aeon.load(monotonic_path, exp02.Patch2.Encoder, downsample=True) raw_data = aeon.load(monotonic_path, exp02.Patch2.Encoder, downsample=None) @@ -55,14 +56,14 @@ def test_load_encoder_with_downsampling(): assert len(data) < len(raw_data) # Check that the first timestamp of the downsampled data is within 20ms of the raw data - assert abs(data.index[0] - raw_data.index[0]).total_seconds() <= 0.02 + assert abs(data.index[0] - raw_data.index[0]).total_seconds() <= DOWNSAMPLE_PERIOD # Check that the last timestamp of the downsampled data is within 20ms of the raw data - assert abs(data.index[-1] - raw_data.index[-1]).total_seconds() <= 0.02 + assert abs(data.index[-1] - raw_data.index[-1]).total_seconds() <= DOWNSAMPLE_PERIOD # Check that the minimum difference between consecutive timestamps in the downsampled data # is at least 20ms (50Hz) - assert data.index.to_series().diff().dt.total_seconds().min() >= 0.02 + assert data.index.to_series().diff().dt.total_seconds().min() >= DOWNSAMPLE_PERIOD # Check that the timestamps in the downsampled data are strictly increasing assert data.index.is_monotonic_increasing From 6e64c838a6fce0812984a885c4614e34ef53dbcb Mon Sep 17 00:00:00 2001 From: glopesdev Date: Thu, 19 Sep 2024 14:14:23 +0100 Subject: [PATCH 25/36] Exclude venv folder from pyright checks --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 541bf4c6..9bf8b2cf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -185,6 +185,6 @@ reportShadowedImports = "error" # *Note*: we may want to set all 'ReportOptional*' rules to "none", but leaving 'em default for now venvPath = "." venv = ".venv" -exclude = ["aeon/dj_pipeline/*"] +exclude = ["aeon/dj_pipeline/*", ".venv/*"] [tool.pytest.ini_options] markers = ["api"] From 8d0c03f1fad9372c9295816349cced7906ce3b19 Mon Sep 17 00:00:00 2001 From: glopesdev Date: Thu, 19 Sep 2024 14:14:32 +0100 Subject: [PATCH 26/36] Remove obsolete and unused qc module --- aeon/qc/__init__.py | 0 aeon/qc/video.py | 47 --------------------------------------------- 2 files changed, 47 deletions(-) delete mode 100644 aeon/qc/__init__.py delete mode 100644 aeon/qc/video.py diff --git a/aeon/qc/__init__.py b/aeon/qc/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/aeon/qc/video.py b/aeon/qc/video.py deleted file mode 100644 index 857f94f9..00000000 --- a/aeon/qc/video.py +++ /dev/null @@ -1,47 +0,0 @@ -import os -from pathlib import Path - -import aeon.io.api as aeon - -root = "/ceph/aeon/test2/experiment0.1" -qcroot = "/ceph/aeon/aeon/qc/experiment0.1" -devicenames = [ - "FrameEast", - "FrameGate", - "FrameNorth", - "FramePatch1", - "FramePatch2", - "FrameSouth", - "FrameTop", - "FrameWest", -] - -for device in devicenames: - videochunks = aeon.chunkdata(root, device) - videochunks["epoch"] = videochunks.path.str.rsplit("/", n=3, expand=True)[1] - - stats = [] - frameshifts = [] - for key, period in videochunks.groupby(by="epoch"): - frame_offset = 0 - path = Path(os.path.join(qcroot, key, device)) - path.mkdir(parents=True, exist_ok=True) - for chunk in period.itertuples(): - outpath = Path(chunk.path.replace(root, qcroot)).with_suffix(".parquet") - print(f"[{key}] Analysing {device} {chunk.Index}... ", end="") - data = aeon.videoreader(chunk.path).reset_index() - deltas = data[data.columns[0:4]].diff() - deltas.columns = ["time_delta", "frame_delta", "hw_counter_delta", "hw_timestamp_delta"] - deltas["frame_offset"] = (deltas.hw_counter_delta - 1).cumsum() + frame_offset - drop_count = deltas.frame_offset.iloc[-1] - max_harp_delta = deltas.time_delta.max().total_seconds() - max_camera_delta = deltas.hw_timestamp_delta.max() / 1e9 # convert nanoseconds to seconds - print( - f"drops: {drop_count - frame_offset} frameOffset: {drop_count} " - + "maxHarpDelta: {max_harp_delta} s maxCameraDelta: {max_camera_delta} s" - ) - stats.append((drop_count, max_harp_delta, max_camera_delta, chunk.path)) - deltas.set_index(data.time, inplace=True) - deltas.to_parquet(outpath) - frameshifts.append(deltas) - frame_offset = drop_count From 97bc21cfd047cc92771c323d763594fa85df758a Mon Sep 17 00:00:00 2001 From: glopesdev Date: Thu, 19 Sep 2024 14:30:31 +0100 Subject: [PATCH 27/36] Apply pyright recommendations --- aeon/analysis/movies.py | 6 +++--- aeon/analysis/plotting.py | 11 +++++------ aeon/analysis/utils.py | 2 +- aeon/io/api.py | 2 +- aeon/io/reader.py | 25 ++++++++++++------------- aeon/io/video.py | 4 ++-- aeon/util.py | 2 +- 7 files changed, 25 insertions(+), 27 deletions(-) diff --git a/aeon/analysis/movies.py b/aeon/analysis/movies.py index 078efad2..3ac3c1e9 100644 --- a/aeon/analysis/movies.py +++ b/aeon/analysis/movies.py @@ -7,7 +7,7 @@ from aeon.io import video -def gridframes(frames, width, height, shape=None): +def gridframes(frames, width, height, shape: None | int | tuple[int, int] = None): """Arranges a set of frames into a grid layout with the specified pixel dimensions and shape. :param list frames: A list of frames to include in the grid layout. @@ -20,7 +20,7 @@ def gridframes(frames, width, height, shape=None): """ if shape is None: shape = len(frames) - if type(shape) not in [list, tuple]: + if isinstance(shape, int): shape = math.ceil(math.sqrt(shape)) shape = (shape, shape) @@ -43,7 +43,7 @@ def gridframes(frames, width, height, shape=None): def averageframes(frames): """Returns the average of the specified collection of frames.""" - return cv2.convertScaleAbs(sum(np.multiply(1 / len(frames), frames))) + return cv2.convertScaleAbs(np.sum(np.multiply(1 / len(frames), frames))) def groupframes(frames, n, fun): diff --git a/aeon/analysis/plotting.py b/aeon/analysis/plotting.py index 39168026..dc6157a7 100644 --- a/aeon/analysis/plotting.py +++ b/aeon/analysis/plotting.py @@ -31,21 +31,20 @@ def heatmap(position, frequency, ax=None, **kwargs): return mesh, cbar -def circle(x, y, radius, fmt=None, ax=None, **kwargs): +def circle(x, y, radius, *args, ax=None, **kwargs): """Plot a circle centered at the given x, y position with the specified radius. :param number x: The x-component of the circle center. :param number y: The y-component of the circle center. :param number radius: The radius of the circle. - :param str, optional fmt: The format used to plot the circle line. :param Axes, optional ax: The Axes on which to draw the circle. """ if ax is None: ax = plt.gca() - points = pd.DataFrame(np.linspace(0, 2 * math.pi, 360), columns=["angle"]) + points = pd.DataFrame({"angle": np.linspace(0, 2 * math.pi, 360)}) points["x"] = radius * np.cos(points.angle) + x points["y"] = radius * np.sin(points.angle) + y - ax.plot(points.x, points.y, fmt, **kwargs) + ax.plot(points.x, points.y, *args, **kwargs) def rateplot( @@ -132,10 +131,10 @@ def colorline( if cmap is None: cmap = plt.get_cmap("copper") if norm is None: - norm = plt.Normalize(0.0, 1.0) + norm = colors.Normalize(0.0, 1.0) z = np.asarray(z) points = np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) - lines = LineCollection(segments, array=z, cmap=cmap, norm=norm, **kwargs) + lines = LineCollection(segments, array=z, cmap=cmap, norm=norm, **kwargs) # type: ignore ax.add_collection(lines) return lines diff --git a/aeon/analysis/utils.py b/aeon/analysis/utils.py index 89534660..13fff107 100644 --- a/aeon/analysis/utils.py +++ b/aeon/analysis/utils.py @@ -48,7 +48,7 @@ def visits(data, onset="Enter", offset="Exit"): data = data.reset_index() data_onset = data[data.event == onset] data_offset = data[data.event == offset] - data = pd.merge(data_onset, data_offset, on="id", how="left", suffixes=[lsuffix, rsuffix]) + data = pd.merge(data_onset, data_offset, on="id", how="left", suffixes=(lsuffix, rsuffix)) # valid pairings have the smallest positive duration data["duration"] = data[time_offset] - data[time_onset] diff --git a/aeon/io/api.py b/aeon/io/api.py index ec9c36ae..ae91794c 100644 --- a/aeon/io/api.py +++ b/aeon/io/api.py @@ -103,7 +103,7 @@ def load(root, reader, start=None, end=None, time=None, tolerance=None, epoch=No filetimes = [chunk for (_, chunk), _ in files] files = [file for _, file in files] for key, values in time.groupby(by=chunk): - i = bisect.bisect_left(filetimes, key) + i = bisect.bisect_left(filetimes, key) # type: ignore if i < len(filetimes): frame = reader.read(files[i], **kwargs) _set_index(frame) diff --git a/aeon/io/reader.py b/aeon/io/reader.py index ba5c8fa4..abb6b97e 100644 --- a/aeon/io/reader.py +++ b/aeon/io/reader.py @@ -43,7 +43,7 @@ def __init__(self, pattern, columns, extension): self.columns = columns self.extension = extension - def read(self, _): + def read(self, file): """Reads data from the specified file.""" return pd.DataFrame(columns=self.columns, index=pd.DatetimeIndex([])) @@ -94,7 +94,7 @@ def read(self, file): """Returns path and epoch information for the specified chunk.""" epoch, chunk = chunk_key(file) data = {"path": file, "epoch": epoch} - return pd.DataFrame(data, index=[chunk], columns=self.columns) + return pd.DataFrame(data, index=pd.Series(chunk), columns=self.columns) class Metadata(Reader): @@ -113,7 +113,7 @@ def read(self, file): workflow = metadata.pop("Workflow") commit = metadata.pop("Commit", pd.NA) data = {"workflow": workflow, "commit": commit, "metadata": [DotMap(metadata)]} - return pd.DataFrame(data, index=[time], columns=self.columns) + return pd.DataFrame(data, index=pd.Series(time), columns=self.columns) class Csv(Reader): @@ -353,7 +353,7 @@ def read(self, file: Path) -> pd.DataFrame: data = self.class_int2str(data, config_file) n_parts = len(parts) part_data_list = [pd.DataFrame()] * n_parts - new_columns = ["identity", "identity_likelihood", "part", "x", "y", "part_likelihood"] + new_columns = pd.Series(["identity", "identity_likelihood", "part", "x", "y", "part_likelihood"]) new_data = pd.DataFrame(columns=new_columns) for i, part in enumerate(parts): part_columns = ( @@ -379,17 +379,16 @@ def read(self, file: Path) -> pd.DataFrame: @staticmethod def get_class_names(config_file: Path) -> list[str]: """Returns a list of classes from a model's config file.""" - classes = None with open(config_file) as f: config = json.load(f) - if config_file.stem == "confmap_config": # SLEAP - try: - heads = config["model"]["heads"] - classes = util.find_nested_key(heads, "class_vectors")["classes"] - except KeyError as err: - if not classes: - raise KeyError(f"Cannot find class_vectors in {config_file}.") from err - return classes + if config_file.stem != "confmap_config": # SLEAP + raise ValueError(f"The model config file '{config_file}' is not supported.") + + try: + heads = config["model"]["heads"] + return util.find_nested_key(heads, "class_vectors")["classes"] + except KeyError as err: + raise KeyError(f"Cannot find class_vectors in {config_file}.") from err @staticmethod def get_bodyparts(config_file: Path) -> list[str]: diff --git a/aeon/io/video.py b/aeon/io/video.py index cbfa0dc7..26c49827 100644 --- a/aeon/io/video.py +++ b/aeon/io/video.py @@ -15,7 +15,7 @@ def frames(data): index = 0 try: for frameidx, path in zip(data._frame, data._path, strict=False): - if filename != path: + if filename != path or capture is None: if capture is not None: capture.release() capture = cv2.VideoCapture(path) @@ -49,7 +49,7 @@ def export(frames, file, fps, fourcc=None): for frame in frames: if writer is None: if fourcc is None: - fourcc = cv2.VideoWriter_fourcc("m", "p", "4", "v") + fourcc = cv2.VideoWriter_fourcc("m", "p", "4", "v") # type: ignore writer = cv2.VideoWriter(file, fourcc, fps, (frame.shape[1], frame.shape[0])) writer.write(frame) finally: diff --git a/aeon/util.py b/aeon/util.py index 1028abf4..2251eaad 100644 --- a/aeon/util.py +++ b/aeon/util.py @@ -14,7 +14,7 @@ def find_nested_key(obj: dict | list, key: str) -> Any: found = find_nested_key(v, key) if found: return found - elif isinstance(obj, list): + else: for item in obj: found = find_nested_key(item, key) if found: From 6bacc43e93826f9a3ffa8e1c8c9189abc0bf4c14 Mon Sep 17 00:00:00 2001 From: glopesdev Date: Thu, 19 Sep 2024 14:30:45 +0100 Subject: [PATCH 28/36] Disable useLibraryCodeForTypes --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 9bf8b2cf..c42028da 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -163,6 +163,7 @@ extend-exclude = [ convention = "google" [tool.pyright] +useLibraryCodeForTypes = false reportMissingImports = "none" reportImportCycles = "error" reportUnusedImport = "error" From d1180a886fad0f604543c0fd8360afd0018d8323 Mon Sep 17 00:00:00 2001 From: glopesdev Date: Thu, 19 Sep 2024 15:34:43 +0100 Subject: [PATCH 29/36] Remove unused function call --- aeon/dj_pipeline/populate/worker.py | 1 - 1 file changed, 1 deletion(-) diff --git a/aeon/dj_pipeline/populate/worker.py b/aeon/dj_pipeline/populate/worker.py index b25a73db..fc9968d1 100644 --- a/aeon/dj_pipeline/populate/worker.py +++ b/aeon/dj_pipeline/populate/worker.py @@ -56,7 +56,6 @@ def ingest_epochs_chunks(): acquisition_worker(ingest_epochs_chunks) acquisition_worker(acquisition.EpochConfig) acquisition_worker(acquisition.Environment) -# acquisition_worker(ingest_environment_visits) acquisition_worker(block_analysis.BlockDetection) # configure a worker to handle pyrat sync From 23c440f24f92d235a24641e0726f94f9f950debe Mon Sep 17 00:00:00 2001 From: glopesdev Date: Thu, 19 Sep 2024 16:04:17 +0100 Subject: [PATCH 30/36] Ensure all roots are Path objects --- aeon/io/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aeon/io/api.py b/aeon/io/api.py index ae91794c..5d505ea6 100644 --- a/aeon/io/api.py +++ b/aeon/io/api.py @@ -87,7 +87,7 @@ def load(root, reader, start=None, end=None, time=None, tolerance=None, epoch=No fileset = { chunk_key(fname): fname for path in root - for fname in path.glob(f"{epoch_pattern}/**/{reader.pattern}.{reader.extension}") + for fname in Path(path).glob(f"{epoch_pattern}/**/{reader.pattern}.{reader.extension}") } files = sorted(fileset.items()) From 5dfd4a458d023ef57cf36cc2a919240968fe7ef4 Mon Sep 17 00:00:00 2001 From: glopesdev Date: Thu, 19 Sep 2024 16:17:53 +0100 Subject: [PATCH 31/36] Exclude dj_pipeline tests from online CI --- .github/workflows/build_env_run_tests.yml | 4 +++- tests/{ => dj_pipeline}/conftest.py | 0 2 files changed, 3 insertions(+), 1 deletion(-) rename tests/{ => dj_pipeline}/conftest.py (100%) diff --git a/.github/workflows/build_env_run_tests.yml b/.github/workflows/build_env_run_tests.yml index 3a8783f6..973dbe9d 100644 --- a/.github/workflows/build_env_run_tests.yml +++ b/.github/workflows/build_env_run_tests.yml @@ -66,12 +66,14 @@ jobs: - name: ruff if: matrix.os == 'ubuntu-latest' run: python -m ruff check . + - name: pyright if: matrix.os == 'ubuntu-latest' run: python -m pyright --level error --project ./pyproject.toml . + - name: pytest if: matrix.os == 'ubuntu-latest' - run: python -m pytest tests/ + run: python -m pytest tests/ --ignore=tests/dj_pipeline - name: generate test coverage report if: matrix.os == 'ubuntu-latest' diff --git a/tests/conftest.py b/tests/dj_pipeline/conftest.py similarity index 100% rename from tests/conftest.py rename to tests/dj_pipeline/conftest.py From f557c48b3d5141fe1f7a5e3f9774661be979af96 Mon Sep 17 00:00:00 2001 From: glopesdev Date: Thu, 19 Sep 2024 16:26:59 +0100 Subject: [PATCH 32/36] Exclude dj_pipeline tests from coverage report --- .github/workflows/build_env_run_tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_env_run_tests.yml b/.github/workflows/build_env_run_tests.yml index 973dbe9d..c5a9382f 100644 --- a/.github/workflows/build_env_run_tests.yml +++ b/.github/workflows/build_env_run_tests.yml @@ -78,8 +78,8 @@ jobs: - name: generate test coverage report if: matrix.os == 'ubuntu-latest' run: | - python -m pytest --cov=aeon ./tests/ --cov-report=xml:./tests/test_coverage/test_coverage_report.xml - #python -m pytest --cov=aeon ./tests/ --cov-report=html:./tests/test_coverage/test_coverage_report_html + python -m pytest --cov=aeon --ignore=tests/dj_pipeline ./tests/ --cov-report=xml:./tests/test_coverage/test_coverage_report.xml + #python -m pytest --cov=aeon --ignore=tests/dj_pipeline ./tests/ --cov-report=html:./tests/test_coverage/test_coverage_report_html - name: upload test coverage report to codecov if: matrix.os == 'ubuntu-latest' uses: codecov/codecov-action@v2 From 81bbfa19e54beddb53ffb6740ef2da1797ed9919 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Milagros=20Mar=C3=ADn?= Date: Fri, 20 Sep 2024 08:03:55 +0200 Subject: [PATCH 33/36] Fix macOS wheel build for `datajoint` (Issue #249) (#406) --- .github/workflows/build_env_run_tests.yml | 55 ++++++++++++++++------- env_config/env.yml | 2 +- env_config/env_dev.yml | 13 +++--- env_config/env_gpu.yml | 2 +- env_config/env_macos.yml | 36 +++++++++++++++ pyproject.toml | 2 +- 6 files changed, 83 insertions(+), 27 deletions(-) create mode 100644 env_config/env_macos.yml diff --git a/.github/workflows/build_env_run_tests.yml b/.github/workflows/build_env_run_tests.yml index c5a9382f..297994a9 100644 --- a/.github/workflows/build_env_run_tests.yml +++ b/.github/workflows/build_env_run_tests.yml @@ -4,14 +4,14 @@ name: build_env_run_tests on: pull_request: - branches: [ main ] + branches: [main] types: [opened, reopened, synchronize] - workflow_dispatch: # allows running manually from Github's 'Actions' tab + workflow_dispatch: # allows running manually from Github's 'Actions' tab jobs: - build_env_pip_pyproject: # checks only for building env using pip and pyproject.toml - name: Build env using pip and pyproject.toml - runs-on: ubuntu-latest + build_env_pip_pyproject: # checks only for building env using pip and pyproject.toml + name: Build env using pip and pyproject.toml on ${{ matrix.os }} + runs-on: ${{ matrix.os }} if: github.event.pull_request.draft == false strategy: matrix: @@ -20,7 +20,7 @@ jobs: fail-fast: false defaults: run: - shell: bash -l {0} # reset shell for each step + shell: ${{ matrix.os == 'windows-latest' && 'cmd' || 'bash' }} -l {0} # Adjust shell based on OS steps: - name: Checkout code uses: actions/checkout@v2 @@ -31,12 +31,12 @@ jobs: - name: Create venv and install dependencies run: | python -m venv .venv - source .venv/bin/activate + .venv/Scripts/activate || source .venv/bin/activate pip install -e .[dev] pip list - .venv/bin/python -c "import aeon" - - build_env_run_tests: # checks for building env using mamba and runs codebase checks and tests + python -c "import aeon" + + build_env_run_tests: # checks for building env using mamba and runs codebase checks and tests name: Build env and run tests on ${{ matrix.os }} runs-on: ${{ matrix.os }} if: github.event.pull_request.draft == false @@ -47,11 +47,13 @@ jobs: fail-fast: false defaults: run: - shell: bash -l {0} # reset shell for each step + shell: ${{ matrix.os == 'windows-latest' && 'cmd' || 'bash' }} -l {0} steps: - - name: checkout repo + - name: Checkout repo uses: actions/checkout@v2 - - name: set up conda env + + - name: Set up conda env (Linux, Windows) + if: ${{ matrix.os != 'macos-latest' }} uses: conda-incubator/setup-miniconda@v2 with: use-mamba: true @@ -59,10 +61,30 @@ jobs: python-version: ${{ matrix.python-version }} environment-file: ./env_config/env.yml activate-environment: aeon + + - name: Set up conda env (macOS) + if: ${{ matrix.os == 'macos-latest' }} + uses: conda-incubator/setup-miniconda@v2 + with: + use-mamba: true + miniforge-variant: Mambaforge + python-version: ${{ matrix.python-version }} + environment-file: ./env_config/env_macos.yml + activate-environment: aeon + architecture: arm64 + miniconda-version: "latest" + + - name: Install datajoint wheel build with pip flag (macOS) + if: ${{ matrix.os == 'macos-latest' }} + run: | + source $CONDA/bin/activate aeon + pip install --use-pep517 datajoint git+https://github.com/datajoint-company/datajoint-utilities.git + - name: Update conda env with dev reqs + if: ${{ matrix.os != 'macos-latest' }} run: mamba env update -f ./env_config/env_dev.yml - # Only run codebase checks and tests for ubuntu. + # Only run codebase checks and tests for Linux (ubuntu). - name: ruff if: matrix.os == 'ubuntu-latest' run: python -m ruff check . @@ -76,12 +98,11 @@ jobs: run: python -m pytest tests/ --ignore=tests/dj_pipeline - name: generate test coverage report - if: matrix.os == 'ubuntu-latest' + if: ${{ matrix.os == 'ubuntu-latest' }} run: | python -m pytest --cov=aeon --ignore=tests/dj_pipeline ./tests/ --cov-report=xml:./tests/test_coverage/test_coverage_report.xml - #python -m pytest --cov=aeon --ignore=tests/dj_pipeline ./tests/ --cov-report=html:./tests/test_coverage/test_coverage_report_html - name: upload test coverage report to codecov - if: matrix.os == 'ubuntu-latest' + if: ${{ matrix.os == 'ubuntu-latest' }} uses: codecov/codecov-action@v2 with: token: ${{ secrets.CODECOV_TOKEN }} diff --git a/env_config/env.yml b/env_config/env.yml index 9a0aeb32..83239f1a 100644 --- a/env_config/env.yml +++ b/env_config/env.yml @@ -1,5 +1,5 @@ # Create env e.g. w/mamba: `mamba env create -q -f env.yml` -# Update exisiting env e.g. w/mamba: `mamba env update -f env.yml` +# Update existing env e.g. w/mamba: `mamba env update -f env.yml` name: aeon channels: diff --git a/env_config/env_dev.yml b/env_config/env_dev.yml index b1e0050f..a758017a 100644 --- a/env_config/env_dev.yml +++ b/env_config/env_dev.yml @@ -1,6 +1,6 @@ # Contains only the dev package requirements. # Create env e.g. w/mamba: `mamba env create -q -f env.yml` -# Update exisiting env e.g. w/mamba: `mamba env update -f env_dev.yml` +# Update existing env e.g. w/mamba: `mamba env update -f env_dev.yml` name: aeon channels: @@ -10,14 +10,13 @@ dependencies: - gh - ipdb - jellyfish - - pre-commit + - pre-commit - pydantic - - pyright - - pytest - - pytest-cov + - pyright + - pytest + - pytest-cov - ruff - - sphinx + - sphinx - tox - pip: - git+https://github.com/Technologicat/pyan.git - diff --git a/env_config/env_gpu.yml b/env_config/env_gpu.yml index ef045e76..f733be25 100644 --- a/env_config/env_gpu.yml +++ b/env_config/env_gpu.yml @@ -1,6 +1,6 @@ # Contains only the gpu package requirements. # Create env e.g. w/mamba: `mamba env create -q -f env.yml` -# Update exisiting env e.g. w/mamba: `mamba env update -f env_gpu.yml` +# Update existing env e.g. w/mamba: `mamba env update -f env_gpu.yml` name: aeon channels: diff --git a/env_config/env_macos.yml b/env_config/env_macos.yml new file mode 100644 index 00000000..42476661 --- /dev/null +++ b/env_config/env_macos.yml @@ -0,0 +1,36 @@ +# Create env e.g. w/mamba: `mamba env create -q -f env.yml` +# Update existing env e.g. w/mamba: `mamba env update -f env.yml` +# Update existing env pip installing datajoint and datajoint-utilities: pip install --use-pep517 datajoint git+https://github.com/datajoint-company/datajoint-utilities.git + +name: aeon +channels: + - conda-forge + - defaults +dependencies: + - python>=3.11 + - pip + - blas>=2.0, <3 + - bottleneck>=1.2.1, <2 + - dash + - dotmap + - fastparquet + - graphviz + - ipykernel + - jupyter + - jupyterlab + - matplotlib + - numba>=0.46.0, <1 + - numexpr>=2.6.8, <3 + - numpy>=1.21.0, <2 + - pandas>=1.3 + - plotly + - pyarrow + - pydotplus + - pymysql + - pyyaml + - scikit-learn + - scipy + - seaborn + - xarray>=0.12.3, <1 + - pip: + - opencv-python diff --git a/pyproject.toml b/pyproject.toml index c42028da..658abf5b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,8 +22,8 @@ license = { file = "license.md" } readme = "readme.md" dependencies = [ "bottleneck>=1.2.1,<2", - "datajoint-utilities @ git+https://github.com/datajoint-company/datajoint-utilities", "datajoint>=0.13.7", + "datajoint-utilities @ git+https://github.com/datajoint-company/datajoint-utilities", "dotmap", "fastparquet", "graphviz", From a678b8d1d6ab84b45eee7d299b4406ac7792f674 Mon Sep 17 00:00:00 2001 From: glopesdev Date: Fri, 20 Sep 2024 10:59:10 +0100 Subject: [PATCH 34/36] Run CI checks using pip env and pyproject.toml --- .github/workflows/build_env_run_tests.yml | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/.github/workflows/build_env_run_tests.yml b/.github/workflows/build_env_run_tests.yml index 297994a9..f62e6a23 100644 --- a/.github/workflows/build_env_run_tests.yml +++ b/.github/workflows/build_env_run_tests.yml @@ -23,9 +23,9 @@ jobs: shell: ${{ matrix.os == 'windows-latest' && 'cmd' || 'bash' }} -l {0} # Adjust shell based on OS steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Create venv and install dependencies @@ -50,16 +50,15 @@ jobs: shell: ${{ matrix.os == 'windows-latest' && 'cmd' || 'bash' }} -l {0} steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Set up conda env (Linux, Windows) if: ${{ matrix.os != 'macos-latest' }} - uses: conda-incubator/setup-miniconda@v2 + uses: conda-incubator/setup-miniconda@v3 with: use-mamba: true miniforge-variant: Mambaforge python-version: ${{ matrix.python-version }} - environment-file: ./env_config/env.yml activate-environment: aeon - name: Set up conda env (macOS) @@ -69,20 +68,16 @@ jobs: use-mamba: true miniforge-variant: Mambaforge python-version: ${{ matrix.python-version }} - environment-file: ./env_config/env_macos.yml activate-environment: aeon architecture: arm64 miniconda-version: "latest" - name: Install datajoint wheel build with pip flag (macOS) if: ${{ matrix.os == 'macos-latest' }} - run: | - source $CONDA/bin/activate aeon - pip install --use-pep517 datajoint git+https://github.com/datajoint-company/datajoint-utilities.git + run: pip install --use-pep517 datajoint git+https://github.com/datajoint-company/datajoint-utilities.git - - name: Update conda env with dev reqs - if: ${{ matrix.os != 'macos-latest' }} - run: mamba env update -f ./env_config/env_dev.yml + - name: Install pyproject.toml dependencies + run: pip install -e .[dev] # Only run codebase checks and tests for Linux (ubuntu). - name: ruff @@ -100,13 +95,13 @@ jobs: - name: generate test coverage report if: ${{ matrix.os == 'ubuntu-latest' }} run: | - python -m pytest --cov=aeon --ignore=tests/dj_pipeline ./tests/ --cov-report=xml:./tests/test_coverage/test_coverage_report.xml + python -m pytest --cov=aeon tests/ --ignore=tests/dj_pipeline --cov-report=xml:tests/test_coverage/test_coverage_report.xml - name: upload test coverage report to codecov if: ${{ matrix.os == 'ubuntu-latest' }} uses: codecov/codecov-action@v2 with: token: ${{ secrets.CODECOV_TOKEN }} - directory: ./tests/test_coverage/ + directory: tests/test_coverage/ files: test_coverage_report.xml fail_ci_if_error: true verbose: true From 21076919a94af54c9840c36c8f293c6cb6560fec Mon Sep 17 00:00:00 2001 From: glopesdev Date: Fri, 20 Sep 2024 11:03:08 +0100 Subject: [PATCH 35/36] Run code checks and tests on all platforms --- .github/workflows/build_env_run_tests.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/build_env_run_tests.yml b/.github/workflows/build_env_run_tests.yml index f62e6a23..8b6c7242 100644 --- a/.github/workflows/build_env_run_tests.yml +++ b/.github/workflows/build_env_run_tests.yml @@ -79,17 +79,14 @@ jobs: - name: Install pyproject.toml dependencies run: pip install -e .[dev] - # Only run codebase checks and tests for Linux (ubuntu). + # Run codebase checks and tests - name: ruff - if: matrix.os == 'ubuntu-latest' run: python -m ruff check . - name: pyright - if: matrix.os == 'ubuntu-latest' run: python -m pyright --level error --project ./pyproject.toml . - name: pytest - if: matrix.os == 'ubuntu-latest' run: python -m pytest tests/ --ignore=tests/dj_pipeline - name: generate test coverage report From 1de5c2554cb7254c54031370845031c61cb7408b Mon Sep 17 00:00:00 2001 From: Chang Huan Lo Date: Fri, 20 Sep 2024 11:56:21 +0100 Subject: [PATCH 36/36] Activate venv for later steps and remove all conda dependencies (#413) * Persist venv across job steps * Update codecov-action version * Remove `env_config` --- .github/workflows/build_env_run_tests.yml | 64 ++++------------------- env_config/env.yml | 37 ------------- env_config/env_dev.yml | 22 -------- env_config/env_gpu.yml | 11 ---- env_config/env_macos.yml | 36 ------------- 5 files changed, 11 insertions(+), 159 deletions(-) delete mode 100644 env_config/env.yml delete mode 100644 env_config/env_dev.yml delete mode 100644 env_config/env_gpu.yml delete mode 100644 env_config/env_macos.yml diff --git a/.github/workflows/build_env_run_tests.yml b/.github/workflows/build_env_run_tests.yml index 8b6c7242..b166c202 100644 --- a/.github/workflows/build_env_run_tests.yml +++ b/.github/workflows/build_env_run_tests.yml @@ -9,7 +9,7 @@ on: workflow_dispatch: # allows running manually from Github's 'Actions' tab jobs: - build_env_pip_pyproject: # checks only for building env using pip and pyproject.toml + build_env_run_tests: # checks for building env using pyproject.toml and runs codebase checks and tests name: Build env using pip and pyproject.toml on ${{ matrix.os }} runs-on: ${{ matrix.os }} if: github.event.pull_request.draft == false @@ -35,67 +35,25 @@ jobs: pip install -e .[dev] pip list python -c "import aeon" - - build_env_run_tests: # checks for building env using mamba and runs codebase checks and tests - name: Build env and run tests on ${{ matrix.os }} - runs-on: ${{ matrix.os }} - if: github.event.pull_request.draft == false - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - python-version: [3.11] - fail-fast: false - defaults: - run: - shell: ${{ matrix.os == 'windows-latest' && 'cmd' || 'bash' }} -l {0} - steps: - - name: Checkout repo - uses: actions/checkout@v4 - - - name: Set up conda env (Linux, Windows) - if: ${{ matrix.os != 'macos-latest' }} - uses: conda-incubator/setup-miniconda@v3 - with: - use-mamba: true - miniforge-variant: Mambaforge - python-version: ${{ matrix.python-version }} - activate-environment: aeon - - - name: Set up conda env (macOS) - if: ${{ matrix.os == 'macos-latest' }} - uses: conda-incubator/setup-miniconda@v2 - with: - use-mamba: true - miniforge-variant: Mambaforge - python-version: ${{ matrix.python-version }} - activate-environment: aeon - architecture: arm64 - miniconda-version: "latest" - - - name: Install datajoint wheel build with pip flag (macOS) - if: ${{ matrix.os == 'macos-latest' }} - run: pip install --use-pep517 datajoint git+https://github.com/datajoint-company/datajoint-utilities.git - - - name: Install pyproject.toml dependencies - run: pip install -e .[dev] - - # Run codebase checks and tests + - name: Activate venv for later steps + run: | + echo "VIRTUAL_ENV=$(pwd)/.venv" >> $GITHUB_ENV + echo "$(pwd)/.venv/bin" >> $GITHUB_PATH # For Unix-like systems + echo "$(pwd)/.venv/Scripts" >> $GITHUB_PATH # For Windows + # Only run codebase checks and tests for Linux (ubuntu). - name: ruff - run: python -m ruff check . - + run: ruff check . - name: pyright - run: python -m pyright --level error --project ./pyproject.toml . - + run: pyright --level error --project ./pyproject.toml . - name: pytest - run: python -m pytest tests/ --ignore=tests/dj_pipeline - + run: pytest tests/ --ignore=tests/dj_pipeline - name: generate test coverage report if: ${{ matrix.os == 'ubuntu-latest' }} run: | python -m pytest --cov=aeon tests/ --ignore=tests/dj_pipeline --cov-report=xml:tests/test_coverage/test_coverage_report.xml - name: upload test coverage report to codecov if: ${{ matrix.os == 'ubuntu-latest' }} - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} directory: tests/test_coverage/ diff --git a/env_config/env.yml b/env_config/env.yml deleted file mode 100644 index 83239f1a..00000000 --- a/env_config/env.yml +++ /dev/null @@ -1,37 +0,0 @@ -# Create env e.g. w/mamba: `mamba env create -q -f env.yml` -# Update existing env e.g. w/mamba: `mamba env update -f env.yml` - -name: aeon -channels: - - conda-forge - - defaults -dependencies: - - python>=3.11 - - pip - - blas>=2.0, <3 - - bottleneck>=1.2.1, <2 - - dash - - dotmap - - fastparquet - - graphviz - - ipykernel - - jupyter - - jupyterlab - - matplotlib - - numba>=0.46.0, <1 - - numexpr>=2.6.8, <3 - - numpy>=1.21.0, <2 - - pandas>=1.3 - - plotly - - pyarrow - - pydotplus - - pymysql - - pyyaml - - scikit-learn - - scipy - - seaborn - - xarray>=0.12.3, <1 - - pip: - - datajoint>=0.13.6, <1 - - git+https://github.com/datajoint-company/datajoint-utilities.git - - opencv-python diff --git a/env_config/env_dev.yml b/env_config/env_dev.yml deleted file mode 100644 index a758017a..00000000 --- a/env_config/env_dev.yml +++ /dev/null @@ -1,22 +0,0 @@ -# Contains only the dev package requirements. -# Create env e.g. w/mamba: `mamba env create -q -f env.yml` -# Update existing env e.g. w/mamba: `mamba env update -f env_dev.yml` - -name: aeon -channels: - - conda-forge - - defaults -dependencies: - - gh - - ipdb - - jellyfish - - pre-commit - - pydantic - - pyright - - pytest - - pytest-cov - - ruff - - sphinx - - tox - - pip: - - git+https://github.com/Technologicat/pyan.git diff --git a/env_config/env_gpu.yml b/env_config/env_gpu.yml deleted file mode 100644 index f733be25..00000000 --- a/env_config/env_gpu.yml +++ /dev/null @@ -1,11 +0,0 @@ -# Contains only the gpu package requirements. -# Create env e.g. w/mamba: `mamba env create -q -f env.yml` -# Update existing env e.g. w/mamba: `mamba env update -f env_gpu.yml` - -name: aeon -channels: - - conda-forge - - defaults -dependencies: - - cupy - - dask diff --git a/env_config/env_macos.yml b/env_config/env_macos.yml deleted file mode 100644 index 42476661..00000000 --- a/env_config/env_macos.yml +++ /dev/null @@ -1,36 +0,0 @@ -# Create env e.g. w/mamba: `mamba env create -q -f env.yml` -# Update existing env e.g. w/mamba: `mamba env update -f env.yml` -# Update existing env pip installing datajoint and datajoint-utilities: pip install --use-pep517 datajoint git+https://github.com/datajoint-company/datajoint-utilities.git - -name: aeon -channels: - - conda-forge - - defaults -dependencies: - - python>=3.11 - - pip - - blas>=2.0, <3 - - bottleneck>=1.2.1, <2 - - dash - - dotmap - - fastparquet - - graphviz - - ipykernel - - jupyter - - jupyterlab - - matplotlib - - numba>=0.46.0, <1 - - numexpr>=2.6.8, <3 - - numpy>=1.21.0, <2 - - pandas>=1.3 - - plotly - - pyarrow - - pydotplus - - pymysql - - pyyaml - - scikit-learn - - scipy - - seaborn - - xarray>=0.12.3, <1 - - pip: - - opencv-python