Skip to content

Commit

Permalink
Release 0.19.4
Browse files Browse the repository at this point in the history
  • Loading branch information
donishadsmith committed Dec 25, 2024
1 parent 0647267 commit 7b76647
Show file tree
Hide file tree
Showing 17 changed files with 1,638 additions and 1,014 deletions.
8 changes: 8 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,14 @@ repos:
args: [--line-length=120]
files: '(^neurocaps/.*\.py$|.*^tests.*\.py$)'

- repo: https://github.com/adamchainz/blacken-docs
rev: 1.19.1
hooks:
- id: blacken-docs
args: [--line-length=120]
additional_dependencies:
- black

- repo: local
hooks:
- id: pytest
Expand Down
435 changes: 283 additions & 152 deletions CHANGELOG.md

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# neurocaps
[![Latest Version](https://img.shields.io/pypi/v/neurocaps.svg)](https://pypi.python.org/pypi/neurocaps/)
[![Python Versions](https://img.shields.io/pypi/pyversions/neurocaps.svg)](https://pypi.python.org/pypi/neurocaps/)
[![DOI](https://img.shields.io/badge/DOI-10.5281%2Fzenodo.11642615-teal)](https://doi.org/10.5281/zenodo.14322286)
[![DOI](https://img.shields.io/badge/DOI-10.5281%2Fzenodo.11642615-teal)](https://doi.org/10.5281/zenodo.14553662)
[![Github Repository](https://img.shields.io/badge/Source%20Code-neurocaps-purple)](https://github.com/donishadsmith/neurocaps)
[![Test Status](https://github.com/donishadsmith/neurocaps/actions/workflows/testing.yaml/badge.svg)](https://github.com/donishadsmith/neurocaps/actions/workflows/testing.yaml)
[![codecov](https://codecov.io/github/donishadsmith/neurocaps/graph/badge.svg?token=WS2V7I16WF)](https://codecov.io/github/donishadsmith/neurocaps)
Expand Down
751 changes: 516 additions & 235 deletions demos/openneuro_demo.ipynb

Large diffs are not rendered by default.

1,215 changes: 727 additions & 488 deletions demos/simulated_demo.ipynb

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions docs/introduction.rst
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
:alt: Python Versions

.. image:: https://img.shields.io/badge/DOI-10.5281%2Fzenodo.11642615-teal
:target: https://doi.org/10.5281/zenodo.14322286
:target: https://doi.org/10.5281/zenodo.14553662
:alt: DOI

.. image:: https://img.shields.io/badge/Source%20Code-neurocaps-purple
Expand Down Expand Up @@ -86,7 +86,7 @@ Citing
------
::

Smith, D. (2024). neurocaps. Zenodo. https://doi.org/10.5281/zenodo.14322286
Smith, D. (2024). neurocaps. Zenodo. https://doi.org/10.5281/zenodo.14553662

Usage
-----
Expand Down
2 changes: 1 addition & 1 deletion neurocaps/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@
__all__ = ["analysis", "extraction"]

# Version in single place
__version__ = "0.19.3.post0"
__version__ = "0.19.4"
4 changes: 2 additions & 2 deletions neurocaps/_utils/analysis/cap2statmap.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,9 @@ def _get_target_indices(atlas_file, reference_atlas, resolution_mm, remove_label
atlas = nib.load(atlas_file)

if reference_atlas == "Schaefer":
reference_atlas_map = datasets.fetch_atlas_schaefer_2018(resolution_mm=resolution_mm)["maps"]
reference_atlas_map = datasets.fetch_atlas_schaefer_2018(resolution_mm=resolution_mm, verbose=0)["maps"]
else:
reference_atlas_map = datasets.fetch_atlas_aal()["maps"]
reference_atlas_map = datasets.fetch_atlas_aal(verbose=0)["maps"]

# Resample schaefer to atlas file using nearest interpolation to retain labels
resampled_reference_atlas = image.resample_to_img(
Expand Down
8 changes: 2 additions & 6 deletions neurocaps/_utils/check_parcel_approach.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,7 @@ def _check_parcel_approach(parcel_approach, call="TimeseriesExtractor"):
parcel_dict["Schaefer"].update({"resolution_mm": 1})

# Get atlas
fetched_schaefer = datasets.fetch_atlas_schaefer_2018(
n_rois=parcel_dict["Schaefer"]["n_rois"],
yeo_networks=parcel_dict["Schaefer"]["yeo_networks"],
resolution_mm=parcel_dict["Schaefer"]["resolution_mm"],
)
fetched_schaefer = datasets.fetch_atlas_schaefer_2018(**parcel_dict["Schaefer"], verbose=0)

parcel_dict["Schaefer"].update({"maps": fetched_schaefer.maps})
network_name = "7Networks_" if parcel_dict["Schaefer"]["yeo_networks"] == 7 else "17Networks_"
Expand Down Expand Up @@ -91,7 +87,7 @@ def _check_parcel_approach(parcel_approach, call="TimeseriesExtractor"):
parcel_dict["AAL"].update({"version": "SPM12"})

# Get atlas
fetched_aal = datasets.fetch_atlas_aal(version=parcel_dict["AAL"]["version"])
fetched_aal = datasets.fetch_atlas_aal(version=parcel_dict["AAL"]["version"], verbose=0)
parcel_dict["AAL"].update({"maps": fetched_aal.maps})
parcel_dict["AAL"].update({"nodes": [label for label in fetched_aal.labels]})

Expand Down
108 changes: 56 additions & 52 deletions neurocaps/analysis/cap.py

Large diffs are not rendered by default.

10 changes: 5 additions & 5 deletions neurocaps/analysis/change_dtype.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,15 @@ def change_dtype(
"""
Perform Participant-wise Dtype Conversion.
Changes the dtypes of each participants numpy array. This function uses the ``.astype()`` method from numpy.
This function can help reduce memory usage. For example, converting a numpy array from "float64" to "float32" can
Changes the dtypes of each participants NumPy array. This function uses the ``.astype()`` method from NumPy.
This function can help reduce memory usage. For example, converting a NumPy array from "float64" to "float32" can
halve the memory required, which is particularly useful when analyzing large datasets on a local machine.
Parameters
----------
subject_timeseries_list: :obj:`list[dict[str, dict[str, np.ndarray]]]` or :obj:`list[os.PathLike]`
A list where each element consist of a dictionary mapping subject IDs to their run IDs and associated
timeseries (TRs x ROIs) as a numpy array. Can also be a list consisting of paths to pickle files
timeseries (TRs x ROIs) as a NumPy array. Can also be a list consisting of paths to pickle files
containing this same structure. The expected structure of each dictionary is as follows:
::
Expand All @@ -45,7 +45,7 @@ def change_dtype(
}
dtype: :obj:`bool` or :obj:`np.floating`
Target data type (e.g "float32" or ``np.float32``) to convert each participant's numpy arrays into.
Target data type (e.g "float32" or ``np.float32``) to convert each participant's NumPy arrays into.
return_dicts: :obj:`bool`, default=True
If True, returns a single dictionary containing the converted input dictionaries. Keys are named "dict_{0}"
Expand All @@ -70,7 +70,7 @@ def change_dtype(
Warning
-------
**Floating Point Precision**: While this function allows conversion to any valid numpy dtype, it is recommended to
**Floating Point Precision**: While this function allows conversion to any valid NumPy dtype, it is recommended to
use floating-point dtypes. Reducing the dtype could introduce rounding errors that may lower the precision of
subsequent analyses as decimal digits are reduced when lower dtypes are requested. Thus, the lowest recommended
floating-point dtype would be "float32", since it allows for memory usage reduction while limiting rounding errors
Expand Down
2 changes: 1 addition & 1 deletion neurocaps/analysis/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def merge_dicts(
----------
subject_timeseries_list: :obj:`list[dict[str, dict[str, np.ndarray]]]` or :obj:`list[os.PathLike]`
A list where each element consist of a dictionary mapping subject IDs to their run IDs and associated
timeseries (TRs x ROIs) as a numpy array. Can also be a list consisting of paths to pickle files
timeseries (TRs x ROIs) as a NumPy array. Can also be a list consisting of paths to pickle files
containing this same structure. The expected structure of each dictionary is as follows:
::
Expand Down
2 changes: 1 addition & 1 deletion neurocaps/analysis/standardize.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def standardize(
----------
subject_timeseries_list: :obj:`list[dict[str, dict[str, np.ndarray]]]` or :obj:`list[os.PathLike]`
A list where each element consist of a dictionary mapping subject IDs to their run IDs and associated
timeseries (TRs x ROIs) as a numpy array. Can also be a list consisting of paths to pickle files
timeseries (TRs x ROIs) as a NumPy array. Can also be a list consisting of paths to pickle files
containing this same structure. The expected structure of each dictionary is as follows:
::
Expand Down
89 changes: 27 additions & 62 deletions neurocaps/extraction/timeseriesextractor.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,12 @@ class TimeseriesExtractor(_TimeseriesExtractorGetter):
- "nodes": A list of node names in the order of the label IDs in the parcellation.
- "regions": The regions or networks in the parcellation.
Refer to documentation from nilearn's ``datasets.fetch_atlas_schaefer_2018`` and ``datasets.fetch_atlas_aal``
functions for more information about the "Schaefer" and "AAL" sub-keys. Also, refer to the "Note" section below
for an explanation of the "Custom" sub-keys.
Refer to `Nilearn's Fetch Schaefer Documentation
<https://nilearn.github.io/stable/modules/generated/nilearn.datasets.fetch_atlas_schaefer_2018.html#nilearn.datasets.fetch_atlas_schaefer_2018>`_
and `Nilearn's Fetch AAL Documentation
<https://nilearn.github.io/stable/modules/generated/nilearn.datasets.fetch_atlas_aal.html#nilearn.datasets.fetch_atlas_aal>`_
for more information about the "Schaefer" and "AAL" sub-keys. Also, refer to the "Note" section below for an
explanation of the "Custom" sub-keys.
standardize: {"zscore_sample", "zscore", "psc", True, False}, default="zscore_sample"
Standardizes the timeseries. Refer to ``nilearn.maskers.NiftiLabelsMasker`` for an explanation of each
Expand All @@ -79,7 +82,7 @@ class TimeseriesExtractor(_TimeseriesExtractorGetter):
Filters out signals below the specified cutoff frequency.
fwhm: :obj:`float`, :obj:`int`, or :obj:`None`, default=None
Applies spatial smoothing to data (in millimeters). Note that using parcellations already averages voxels
Applies spatial smoothing to data (in millimeters). Note that using a parcellation already averages voxels
within parcel boundaries, which can improve signal-to-noise ratio (SNR) assuming Gaussian noise
distribution. However, smoothing may also blur parcel boundaries.
Expand Down Expand Up @@ -114,10 +117,13 @@ class TimeseriesExtractor(_TimeseriesExtractorGetter):
- "n_after": An integer indicating the number of volumes to scrub after to the flagged volume. Hence,
if frame 5 is flagged and "n_after" is 2, then volumes 5, 6, and 7 are scrubbed.
- "use_sample_mask": A boolean value. If True, a sample mask is generated and passed to the ``sample_mask``
parameter in nilearn's ``NiftiLabelsMasker`` to censor prior to nuisance regression. Internally,
parameter in Nilearn's ``NiftiLabelsMasker`` to censor prior to nuisance regression. Internally,
``clean__extrapolate`` is set to False and passed to ``NiftiLabelsMasker``, which prevents censored
volumes at the end from being interpolated prior to applying the butterworth filter. See
documentation from ``nilearn.signal_clean`` and ``nilearn.maskers.NiftiLabelsMasker`` for how nilearn
`Nilearn's Signal Clean Documentation
<https://nilearn.github.io/stable/modules/generated/nilearn.signal.clean.html>`_ and
`Nilearn's NiftiLabelsMasker Documentation
<https://nilearn.github.io/stable/modules/generated/nilearn.maskers.NiftiLabelsMasker.html>`_ for how Nilearn
handles censored volumes when ``sample_mask`` is used. If this key is set to False, data is only censored
after nuisance regression, which is the default behavior.
Expand Down Expand Up @@ -149,7 +155,7 @@ class TimeseriesExtractor(_TimeseriesExtractorGetter):
"max" is set to five, then five dummy volumes will be discarded.
dtype: :obj:`str` or "auto", default=None
The numpy dtype the NIfTI images are converted to when passed to nilearn's ``load_img`` function.
The NumPy dtype the NIfTI images are converted to when passed to Nilearn's ``load_img`` function.
Properties
Expand Down Expand Up @@ -204,10 +210,10 @@ class initialization using ``self.space = "New Space"`` if the template space ne
extraction.
n_cores: :obj:`int` or :obj:`None`
Number of cores used for multiprocessing with joblib.
Number of cores used for multiprocessing with Joblib.
subject_timeseries: :obj:`dict[str, dict[str, np.ndarray]]` or :obj:`None`
A dictionary mapping subject IDs to their run IDs and their associated timeseries (TRs x ROIs) as a numpy array.
A dictionary mapping subject IDs to their run IDs and their associated timeseries (TRs x ROIs) as a NumPy array.
Can also be a path to a pickle file containing this same structure. If this property needs to be deleted due
to memory issues, ``delattr(self, "_subject_timeseries")`` (version < 0.18.10) or
``del self.subject_timeseries`` (version >= 0.18.10) can be used to delete this property and only have it
Expand Down Expand Up @@ -517,7 +523,7 @@ def get_bold(
then ``pipeline_name = "fmriprep/fmriprep-20.0.0"``.
n_cores: :obj:`int` or :obj:`None`, default=None
The number of cores to use for multiprocessing with joblib. The default backend for joblib is used.
The number of cores to use for multiprocessing with Joblib. The "loky" backend is used.
parallel_log_config: :obj:`dict[str, Union[multiprocessing.Manager.Queue, int]]`
Passes a user-defined managed queue and logging level to the internal timeseries extraction function
Expand All @@ -531,46 +537,8 @@ def get_bold(
- "level": The logging level (e.g. ``logging.INFO``, ``logging.WARNING``). If not specified, the default
level is ``logging.INFO``.
::
import logging
from logging.handlers import QueueListener
from multiprocessing import Manager
# Configure root with FileHandler
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
file_handler = logging.FileHandler('neurocaps.log')
file_handler.setFormatter(logging.Formatter('%(asctime)s %(name)s [%(levelname)s] %(message)s'))
root_logger.addHandler(file_handler)
if __name__ == "__main__":
# Import the TimeseriesExtractor
from neurocaps.extraction import TimeseriesExtractor
# Setup managed queue
manager = Manager()
queue = manager.Queue()
# Set up the queue listener
listener = QueueListener(queue, *root_logger.handlers)
# Start listener
listener.start()
extractor = TimeseriesExtractor()
# Use the `parallel_log_config` parameter to pass queue and the logging level
extractor.get_bold(
bids_dir="path/to/bids/dir",
task="rest",
tr=2,
n_cores=5,
parallel_log_config = {"queue": queue, "level": logging.WARNING}
)
# Stop listener
listener.stop()
Refer to the `neurocaps Logging Documentation <https://neurocaps.readthedocs.io/en/stable/logging.html>`_
for a detailed example of setting up this parameter.
.. versionchanged:: 0.18.0 moved from being the last parameter, to being underneath ``n_cores``
Expand All @@ -587,14 +555,13 @@ def get_bold(
-------
self
.. versionadded:: 0.19.3
Note
----
**Subject Timeseries Dictionary**: This method stores the extracted timeseries of all subjects
in ``self.subject_timeseries``. The structure is a dictionary mapping subject IDs to their run IDs and
their associated timeseries (TRs x ROIs) as a numpy array:
their associated timeseries (TRs x ROIs) as a NumPy array:
::
Expand All @@ -613,18 +580,18 @@ def get_bold(
By default, "run-0", will be used if run IDs are not specified in the NifTI file.
**Parcellation & Nuisance Regression**: For timeseries extraction, nuisance regression, and spatial
dimensionality reduction using a parcellation, nilearn's ``NiftiLabelsMasker`` function is used. If requested,
dimensionality reduction using a parcellation, Nilearn's ``NiftiLabelsMasker`` function is used. If requested,
dummy scans are removed from the NIfTI images and confound dataset prior to timeseries extraction. For volumes
exceeding a specified framewise displacement (FD) threshold, if the "use_sample_mask" key in the
``fd_threshold`` dictionary is set to True, then a boolean sample mask is generated (where False indicates the
high motion volumes) and passed to the ``sample_mask`` parameter in nilearn's ``NiftiLabelsMasker``. If,
high motion volumes) and passed to the ``sample_mask`` parameter in Nilearn's ``NiftiLabelsMasker``. If,
"use_sample_mask" key is False or not specified in the ``fd_threshold`` dictionary, then censoring is done
after nuisance regression, which is the default behavior.
**Extraction of Task Conditions**: when extracting specific conditions, ``int`` to round down for the
beginning scan index ``start_scan = int(onset/tr)`` and ``math.ceil`` is used to round up for the ending scan
index ``end_scan = math.ceil((onset + duration)/tr)``. Filtering a specific condition from the
timeseries is done after nuisance regression. Additionally, if the "use_sample_mask" key in the
timeseries is done after nuisance regression. Additionally, if the "use_sample_mask" key in the
``fd_threshold`` dictionary is set to True, then the truncated 2D timeseries is temporarily padded to
ensure the correct rows corresponding to the condition are obtained.
"""
Expand Down Expand Up @@ -679,7 +646,7 @@ def get_bold(
for subj_id in self._subject_ids
]

parallel = Parallel(return_as="generator", n_jobs=self._n_cores)
parallel = Parallel(return_as="generator", n_jobs=self._n_cores, backend="loky")
outputs = parallel(delayed(_extract_timeseries)(*args) for args in args_list)

for output in outputs:
Expand Down Expand Up @@ -1007,7 +974,6 @@ def timeseries_to_pickle(self, output_dir: Union[str, os.PathLike], filename: Op
-------
self
.. versionadded:: 0.19.3
"""
if not self.subject_timeseries:
Expand Down Expand Up @@ -1086,7 +1052,6 @@ def visualize_bold(
-------
self
.. versionadded:: 0.19.3
Note
Expand Down Expand Up @@ -1164,7 +1129,7 @@ def _get_roi_indices(self, roi_indx, parcellation_name):
if "Custom" in self._parcel_approach and "nodes" not in self._parcel_approach["Custom"]:
_check_parcel_approach(parcel_approach=self._parcel_approach, call="visualize_bold")

plot_indxs = self._parcel_approach[parcellation_name]["nodes"].index(roi_indx)
plot_indxs = list(self._parcel_approach[parcellation_name]["nodes"]).index(roi_indx)
else:
if all([isinstance(indx, int) for indx in roi_indx]):
plot_indxs = np.array(roi_indx)
Expand All @@ -1174,7 +1139,7 @@ def _get_roi_indices(self, roi_indx, parcellation_name):
_check_parcel_approach(parcel_approach=self._parcel_approach, call="visualize_bold")

plot_indxs = np.array(
[self._parcel_approach[parcellation_name]["nodes"].index(index) for index in roi_indx]
[list(self._parcel_approach[parcellation_name]["nodes"]).index(index) for index in roi_indx]
)
else:
raise ValueError("All elements in `roi_indx` need to be all strings or all integers.")
Expand All @@ -1187,8 +1152,8 @@ def _get_region_indices(self, region, parcellation_name):
_check_parcel_approach(parcel_approach=self._parcel_approach, call="visualize_bold")
else:
plot_indxs = np.array(
self._parcel_approach["Custom"]["regions"][region]["lh"]
+ self._parcel_approach["Custom"]["regions"][region]["rh"]
list(self._parcel_approach["Custom"]["regions"][region]["lh"])
+ list(self._parcel_approach["Custom"]["regions"][region]["rh"])
)
else:
plot_indxs = np.array(
Expand Down
Binary file modified tests/data/HCPex_parcel_approach.pkl
Binary file not shown.
Loading

0 comments on commit 7b76647

Please sign in to comment.