diff --git a/.github/workflows/run_tests_cli.yml b/.github/workflows/run_tests_cli.yml index eb3b15512..c030779bc 100644 --- a/.github/workflows/run_tests_cli.yml +++ b/.github/workflows/run_tests_cli.yml @@ -12,54 +12,40 @@ on: jobs: tests_cli: - runs-on: ubuntu-22.04 - - # only trigger update on upstream repo if: github.repository_owner == 'cpp-lln-lab' - strategy: fail-fast: false matrix: python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] - steps: - - name: Install dependencies run: | sudo apt-get -y -qq update sudo apt-get -y install octave liboctave-dev - - name: Info - run: | - octave --version - + run: octave --version - uses: actions/setup-node@v4 with: node-version: 18 - - uses: actions/setup-python@v5 name: Set up Python ${{ matrix.python-version }} with: python-version: ${{ matrix.python-version }} - - name: Clone bidspm uses: actions/checkout@v4 with: submodules: recursive fetch-depth: 0 - - name: Install validators run: | make install pip install .[test] - - name: Run tests and generate coverage report run: | coverage erase coverage run --source src -m pytest coverage xml - # - name: Code coverage # uses: codecov/codecov-action@v4 # with: @@ -67,3 +53,67 @@ jobs: # flags: cli # name: codecov-cli # fail_ci_if_error: false + + boutiques: + runs-on: ubuntu-22.04 + if: github.repository_owner == 'cpp-lln-lab' + steps: + - name: Clone bidspm + uses: actions/checkout@v4 + with: + submodules: recursive + fetch-depth: 0 + + - uses: actions/setup-node@v4 + with: + node-version: 18 + - uses: actions/setup-python@v5 + name: Set up Python + with: + python-version: '3.12' + + - name: Install dependencies + run: | + sudo apt-get -y -qq update + sudo apt-get -y install unzip wget git-annex + - name: Install datalad + run: | + python -m pip install --upgrade pip setuptools + pip install datalad + + - name: Get data + run: | + cd demos/openneuro/ + make data_ds000114_verbal + + - name: Install SPM + run: git clone https://github.com/spm/spm12.git --depth 1 + - name: Install octave + run: | + sudo apt-get -y -qq update + sudo apt-get -y install \ + octave \ + liboctave-dev\ + octave-common \ + octave-io \ + octave-image \ + octave-signal \ + octave-statistics + - name: Compile SPM + run: | + make -C spm12/src PLATFORM=octave distclean + make -C spm12/src PLATFORM=octave + make -C spm12/src PLATFORM=octave install + octave $OCTFLAGS --eval "addpath(fullfile(pwd, 'spm12')); savepath();" + - name: Info + run: octave --version + + - name: Install + run: | + make install + pip install boutiques + + - name: Run via boutiques + run: | + bosh exec launch --no-container boutiques/bidspm_3.1.1.json boutiques/invocation_smooth.json + bosh exec launch --no-container boutiques/bidspm_3.1.1.json boutiques/invocation_stats.json diff --git a/WIP/boutiques/WIP.md b/WIP/boutiques/WIP.md deleted file mode 100644 index 7d7b85b53..000000000 --- a/WIP/boutiques/WIP.md +++ /dev/null @@ -1,16 +0,0 @@ -# Boutiques descriptor - -This directory contains a -[Boutiques](https://github.com/boutiques/boutiques) descriptor for the -BIDS app and an example of invocation. - -## How to use - -* Install Boutiques: `pip install boutiques` -* Run the example: `bosh ./bids-app-example.json -i ./invocation.json -e -d` - (assumes `ds001` is a valid dataset in the current working - directory.) - -See the BIDS execution BEP fore more info: - -https://docs.google.com/document/d/104HLZedFtx0TaXEUwd7eyWvJUlc0CcSUtCzwjNgmGxE/edit diff --git a/WIP/create_boutiques_descriptor.py b/WIP/create_boutiques_descriptor.py deleted file mode 100644 index 313c7730a..000000000 --- a/WIP/create_boutiques_descriptor.py +++ /dev/null @@ -1,15 +0,0 @@ -import boutiques.creator as bc - -from bidspm.parsers import common_parser - -newDescriptor = bc.CreateDescriptor(common_parser(), execname="bidspm") - -newDescriptor.save("my-new-descriptor.json") - -# args = common_parser.parse_args() -# invoc = newDescriptor.createInvocation(args) - -# # Then, if you want to save them to a file... -# import json -# with open('my-inputs.json', 'w') as fhandle: -# fhandle.write(json.dumps(invoc, indent=4)) diff --git a/boutiques/README.md b/boutiques/README.md new file mode 100644 index 000000000..becc0c227 --- /dev/null +++ b/boutiques/README.md @@ -0,0 +1,14 @@ +# Boutiques descriptor + +This directory contains a +[Boutiques](https://github.com/boutiques/boutiques) descriptor for the BIDS app and an example of invocation. + +## How to use + +* Install Boutiques: `pip install boutiques` +* Run the example: + +```bash +bosh exec launch --no-container boutiques/bidspm_3.1.1.json boutiques/invocation_smooth.json +bosh exec launch --no-container boutiques/bidspm_3.1.1.json boutiques/invocation_stats.json +``` diff --git a/boutiques/bidspm_3.1.1.json b/boutiques/bidspm_3.1.1.json new file mode 100644 index 000000000..5f3156f25 --- /dev/null +++ b/boutiques/bidspm_3.1.1.json @@ -0,0 +1,488 @@ +{ + "name": "tool name", + "description": "tool description", + "tool-version": "3.1.1", + "schema-version": "0.5", + "command-line": "bidspm [VERSION] [BIDS_DIR] [OUTPUT_DIR] [ANALYSIS_LEVEL] [COMMAND] [PARTICIPANT_LABEL] [VERBOSITY] [BIDS_FILTER_FILE] [OPTIONS] [BOILERPLATE_ONLY] [ANAT_ONLY] [DUMMY_SCANS] [TASK] [SPACE] [FWHM] [DRY_RUN] [SKIP_VALIDATION] [IGNORE] [ROI_NAME] [ROI_ATLAS] [PREPROC_DIR] [HEMISPHERE] [MODEL_FILE] [KEEP_RESIDUALS] [DESIGN_ONLY] [USE_DUMMY_REGRESSOR] [ROI_BASED] [ROI_DIR] [NODE_NAME] [CONCATENATE] [MODELS_DIR]", + "inputs": [ + { + "name": "version", + "id": "version", + "description": "show program's version number and exit", + "type": "Flag", + "optional": true, + "command-line-flag": "-v", + "value-key": "[VERSION]" + }, + { + "name": "bids_dir", + "id": "bids_dir", + "description": "Fullpath to the directory with the input dataset formatted according to the BIDS standard", + "type": "String", + "optional": false, + "value-key": "[BIDS_DIR]" + }, + { + "name": "output_dir", + "id": "output_dir", + "description": "Fullpath to the directory where the output files will be stored", + "type": "String", + "optional": false, + "value-key": "[OUTPUT_DIR]" + }, + { + "name": "analysis_level", + "id": "analysis_level", + "description": "Level of the analysis that wsub_command_parserill be performed. Multiple participant level analyses can be run independently (in parallel) using the same ``output_dir``", + "type": "String", + "optional": false, + "value-key": "[ANALYSIS_LEVEL]", + "value-choices": [ + "subject", + "dataset" + ] + }, + { + "name": "participant_label", + "id": "participant_label", + "description": "The label(s) of the participant(s) that should be analyzed. The label corresponds to sub- from the BIDS spec (so it does not include \"sub-\"). If this parameter is not provided all subjects should be analyzed. Multiple participants can be specified with a space separated list. Can be a regular expression. Example: ``'01', '03', '08'``", + "type": "String", + "list": true, + "optional": true, + "command-line-flag": "--participant_label", + "value-key": "[PARTICIPANT_LABEL]" + }, + { + "name": "verbosity", + "id": "verbosity", + "description": "Verbosity level", + "type": "Number", + "optional": true, + "default-value": 2, + "command-line-flag": "--verbosity", + "value-key": "[VERBOSITY]", + "value-choices": [ + 0, + 1, + 2, + 3 + ] + }, + { + "name": "bids_filter_file", + "id": "bids_filter_file", + "description": "Fullpath to a JSON file describing custom BIDS input filters", + "type": "String", + "optional": true, + "command-line-flag": "--bids_filter_file", + "value-key": "[BIDS_FILTER_FILE]" + }, + { + "name": "options", + "id": "options", + "description": "Path to JSON file containing bidspm options", + "type": "String", + "optional": true, + "command-line-flag": "--options", + "value-key": "[OPTIONS]" + }, + { + "name": "boilerplate_only", + "id": "boilerplate_only", + "description": "When set to ``true`` this will only generate figures describing the raw data, the methods section boilerplate", + "type": "Flag", + "optional": true, + "command-line-flag": "--boilerplate_only", + "value-key": "[BOILERPLATE_ONLY]" + }, + { + "name": "anat_only", + "id": "anat_only", + "description": "If preprocessing should be done only on anatomical data", + "type": "Flag", + "optional": true, + "command-line-flag": "--anat_only", + "value-key": "[ANAT_ONLY]" + }, + { + "name": "dummy_scans", + "id": "dummy_scans", + "description": "Number of dummy scans to remove", + "type": "Number", + "optional": true, + "command-line-flag": "--dummy_scans", + "value-key": "[DUMMY_SCANS]" + }, + { + "name": "task", + "id": "task", + "description": "Tasks of the input data", + "type": "String", + "list": true, + "optional": true, + "command-line-flag": "--task", + "value-key": "[TASK]" + }, + { + "name": "space", + "id": "space", + "description": "Space of the input data", + "type": "String", + "list": true, + "optional": true, + "default-value": [ + "IXI549Space" + ], + "command-line-flag": "--space", + "value-key": "[SPACE]" + }, + { + "name": "fwhm", + "id": "fwhm", + "description": "The full width at half maximum of the gaussian kernel to apply to the preprocessed data or to use as inputs for the statistical analysis", + "type": "Number", + "optional": true, + "default-value": 6.0, + "command-line-flag": "--fwhm", + "value-key": "[FWHM]" + }, + { + "name": "dry_run", + "id": "dry_run", + "description": "When set to ``true`` this will generate and save the SPM batches, but not actually run them", + "type": "Flag", + "optional": true, + "command-line-flag": "--dry_run", + "value-key": "[DRY_RUN]" + }, + { + "name": "skip_validation", + "id": "skip_validation", + "description": "To skip BIDS dataset and BIDS stats model validation", + "type": "Flag", + "optional": true, + "command-line-flag": "--skip_validation", + "value-key": "[SKIP_VALIDATION]" + }, + { + "name": "ignore", + "id": "ignore", + "description": "To specify steps to skip", + "type": "String", + "list": true, + "optional": true, + "command-line-flag": "--ignore", + "value-key": "[IGNORE]", + "value-choices": [ + "fieldmaps", + "slicetiming", + "unwarp", + "qa" + ] + }, + { + "name": "roi_name", + "id": "roi_name", + "description": "Name of the roi to create. If the ROI does not exist in the atlas, the list of available ROI will be returned in the error message", + "type": "String", + "list": true, + "optional": true, + "command-line-flag": "--roi_name", + "value-key": "[ROI_NAME]" + }, + { + "name": "roi_atlas", + "id": "roi_atlas", + "description": "Atlas to create the regions of interest from", + "type": "String", + "optional": true, + "command-line-flag": "--roi_atlas", + "value-key": "[ROI_ATLAS]", + "value-choices": [ + "anatomy_toobox", + "wang", + "visfatlas", + "glasser", + "neuromorphometrics", + "hcpex" + ] + }, + { + "name": "preproc_dir", + "id": "preproc_dir", + "description": "Fullpath to the directory with the preprocessed data", + "type": "String", + "optional": true, + "command-line-flag": "--preproc_dir", + "value-key": "[PREPROC_DIR]" + }, + { + "name": "hemisphere", + "id": "hemisphere", + "description": "To specify steps to skip", + "type": "String", + "list": true, + "optional": true, + "command-line-flag": "--hemisphere", + "value-key": "[HEMISPHERE]", + "value-choices": [ + "L", + "R" + ] + }, + { + "name": "model_file", + "id": "model_file", + "description": "Fullpath to BIDS stats model", + "type": "String", + "optional": true, + "command-line-flag": "--model_file", + "value-key": "[MODEL_FILE]" + }, + { + "name": "keep_residuals", + "id": "keep_residuals", + "description": "Keep GLM residuals", + "type": "Flag", + "optional": true, + "command-line-flag": "--keep_residuals", + "value-key": "[KEEP_RESIDUALS]" + }, + { + "name": "design_only", + "id": "design_only", + "description": "To only specify the GLM without estimating it", + "type": "Flag", + "optional": true, + "command-line-flag": "--design_only", + "value-key": "[DESIGN_ONLY]" + }, + { + "name": "use_dummy_regressor", + "id": "use_dummy_regressor", + "description": "If true any missing condition will be modelled by a dummy regressor of ``NaN``", + "type": "Flag", + "optional": true, + "command-line-flag": "--use_dummy_regressor", + "value-key": "[USE_DUMMY_REGRESSOR]" + }, + { + "name": "roi_based", + "id": "roi_based", + "description": "Use to run a ROI-based analysis", + "type": "Flag", + "optional": true, + "command-line-flag": "--roi_based", + "value-key": "[ROI_BASED]" + }, + { + "name": "roi_dir", + "id": "roi_dir", + "description": "Fullpath to the directory with the regions of interest", + "type": "String", + "optional": true, + "command-line-flag": "--roi_dir", + "value-key": "[ROI_DIR]" + }, + { + "name": "node_name", + "id": "node_name", + "description": "Model node to run", + "type": "String", + "optional": true, + "command-line-flag": "--node_name", + "value-key": "[NODE_NAME]" + }, + { + "name": "concatenate", + "id": "concatenate", + "description": "To create 4D image of all the beta and contrast images of the conditions of interest included in the run level design matrix", + "type": "Flag", + "optional": true, + "command-line-flag": "--concatenate", + "value-key": "[CONCATENATE]" + }, + { + "name": "models_dir", + "id": "models_dir", + "description": "Fullpath to the directory with the models", + "type": "String", + "optional": true, + "command-line-flag": "--models_dir", + "value-key": "[MODELS_DIR]" + }, + { + "name": "command", + "id": "command", + "description": "Choose a subcommand", + "type": "String", + "optional": false, + "value-key": "[COMMAND]", + "value-choices": [ + "preprocess", + "smooth", + "default_model", + "create_roi", + "stats", + "contrasts", + "results", + "bms" + ], + "value-requires": { + "preprocess": [], + "smooth": [], + "default_model": [], + "create_roi": [ + "roi_name" + ], + "stats": [ + "model_file" + ], + "contrasts": [], + "results": [], + "bms": [ + "models_dir" + ] + }, + "value-disables": { + "preprocess": [ + "roi_based", + "keep_residuals", + "node_name", + "model_file", + "preproc_dir", + "hemisphere", + "roi_atlas", + "concatenate", + "models_dir", + "roi_name", + "roi_dir", + "design_only", + "use_dummy_regressor" + ], + "smooth": [ + "roi_based", + "keep_residuals", + "node_name", + "model_file", + "preproc_dir", + "hemisphere", + "roi_atlas", + "concatenate", + "dummy_scans", + "models_dir", + "boilerplate_only", + "roi_name", + "roi_dir", + "design_only", + "use_dummy_regressor", + "skip_validation", + "ignore" + ], + "default_model": [ + "roi_based", + "keep_residuals", + "node_name", + "model_file", + "preproc_dir", + "hemisphere", + "roi_atlas", + "concatenate", + "dummy_scans", + "models_dir", + "fwhm", + "boilerplate_only", + "dry_run", + "roi_name", + "roi_dir", + "design_only", + "use_dummy_regressor", + "anat_only" + ], + "create_roi": [ + "roi_based", + "keep_residuals", + "node_name", + "model_file", + "concatenate", + "dummy_scans", + "models_dir", + "fwhm", + "boilerplate_only", + "task", + "dry_run", + "roi_dir", + "design_only", + "use_dummy_regressor", + "skip_validation", + "anat_only", + "ignore", + "verbosity" + ], + "stats": [ + "hemisphere", + "dummy_scans", + "models_dir", + "anat_only" + ], + "contrasts": [ + "roi_based", + "keep_residuals", + "hemisphere", + "roi_atlas", + "dummy_scans", + "models_dir", + "roi_name", + "participant_label", + "roi_dir", + "design_only", + "use_dummy_regressor", + "skip_validation", + "anat_only", + "ignore" + ], + "results": [ + "roi_based", + "keep_residuals", + "node_name", + "hemisphere", + "concatenate", + "dummy_scans", + "models_dir", + "roi_name", + "roi_dir", + "design_only", + "use_dummy_regressor", + "anat_only", + "ignore" + ], + "bms": [ + "space", + "roi_based", + "keep_residuals", + "node_name", + "model_file", + "preproc_dir", + "hemisphere", + "roi_atlas", + "concatenate", + "dummy_scans", + "boilerplate_only", + "task", + "roi_name", + "roi_dir", + "design_only", + "use_dummy_regressor", + "anat_only", + "ignore" + ] + } + } + ], + "tags": {}, + "suggested-resources": { + "cpu-cores": 1, + "ram": 1, + "walltime-estimate": 60 + } +} diff --git a/boutiques/create_boutiques_descriptor.py b/boutiques/create_boutiques_descriptor.py new file mode 100644 index 000000000..ea92260aa --- /dev/null +++ b/boutiques/create_boutiques_descriptor.py @@ -0,0 +1,25 @@ +import json + +import boutiques.creator as bc +from bidspm.parsers import sub_command_parser + +newDescriptor = bc.CreateDescriptor(sub_command_parser(), execname="bidspm") + +newDescriptor.save("bidspm.json") + +cmd = ( + "./demos/openneuro/inputs/ds000114-fmriprep " + "./demos/openneuro/outputs/ds000114/derivatives " + "subject smooth " + "--participant_label 01 02 " + "--fwhm 6 " + "--task overtverbgeneration overtwordrepetition covertverbgeneration " + "--space MNI152NLin2009cAsym" +) + +parser = sub_command_parser() +args = parser.parse_args(cmd.split(" ")) +invoc = newDescriptor.createInvocation(args) + +with open("my-inputs.json", "w") as fhandle: + fhandle.write(json.dumps(invoc, indent=4)) diff --git a/boutiques/invocation_smooth.json b/boutiques/invocation_smooth.json new file mode 100644 index 000000000..887260ec1 --- /dev/null +++ b/boutiques/invocation_smooth.json @@ -0,0 +1,22 @@ +{ + "bids_dir": "./demos/openneuro/inputs/ds000114-fmriprep", + "output_dir": "./demos/openneuro/outputs/ds000114/derivatives", + "analysis_level": "subject", + "command": "smooth", + "participant_label": [ + "01", + "02" + ], + "verbosity": 2, + "task": [ + "overtverbgeneration", + "overtwordrepetition", + "covertverbgeneration" + ], + "space": [ + "MNI152NLin2009cAsym" + ], + "fwhm": 6.0, + "anat_only": false, + "dry_run": false +} diff --git a/boutiques/invocation_stats.json b/boutiques/invocation_stats.json new file mode 100644 index 000000000..e3c1fdb27 --- /dev/null +++ b/boutiques/invocation_stats.json @@ -0,0 +1,17 @@ +{ + "bids_dir": "./demos/openneuro/inputs/ds000114", + "output_dir": "./demos/openneuro/outputs/ds000114/derivatives", + "analysis_level": "subject", + "command": "stats", + "participant_label": [ + "01", + "02" + ], + "preproc_dir": "./demos/openneuro/outputs/ds000114/derivatives/bidspm-preproc", + "verbosity": 2, + "fwhm": 6.0, + "model_file": "./demos/openneuro/models/model-ds000114_desc-testRetestVerbal_smdl.json", + "dry_run": false, + "skip_validation": true, + "roi_atlas": "hcpex" +} diff --git a/demos/openneuro/Makefile b/demos/openneuro/Makefile index b540e3c55..e03fb257d 100644 --- a/demos/openneuro/Makefile +++ b/demos/openneuro/Makefile @@ -44,6 +44,14 @@ data_ds000114_fmriprep: cd inputs/ds000114-fmriprep && datalad get sub-0*/ses-*/func/*tsv -J 12 cd inputs/ds000114-fmriprep && datalad get sub-0*/ses-*/func/*json -J 12 +data_ds000114_verbal: data_ds000114_fmriprep + mkdir -p inputs + cd inputs && datalad install ///openneuro/ds000114 + cd inputs/ds000114-fmriprep && datalad get sub-0[1-2]/anat/*MNI*desc-preproc*.nii.gz -J 12 + cd inputs/ds000114-fmriprep && datalad get sub-0[1-2]/ses-*/func/*MNI*_mask.nii.gz -J 12 + cd inputs/ds000114-fmriprep && datalad get sub-0[1-2]/ses-*/func/*verb*MNI*desc-preproc*bold.nii.gz -J 12 + cd inputs/ds000114-fmriprep && datalad get sub-0[1-2]/ses-*/func/*word*MNI*desc-preproc*bold.nii.gz -J 12 + data_ds000114: data_ds000114_fmriprep mkdir -p inputs cd inputs && datalad install ///openneuro/ds000114 diff --git a/demos/openneuro/ds000114_run.m b/demos/openneuro/ds000114_run.m index 6f9df24b0..98c4040a9 100644 --- a/demos/openneuro/ds000114_run.m +++ b/demos/openneuro/ds000114_run.m @@ -10,6 +10,7 @@ SMOOTH = false; TASK = 'verbal'; % 'verbal' / 'linebisection' +FWHM = 6; % The directory where the data are located root_dir = fileparts(mfilename('fullpath')); @@ -50,7 +51,7 @@ 'action', 'smooth', ... 'task', task, ... 'space', space, ... - 'fwhm', 8, ... + 'fwhm', FWHM, ... 'verbosity', 3); %#ok<*UNRCH> end @@ -65,7 +66,7 @@ 'model_file', model_file, ... 'roi_atlas', 'hcpex', ... 'space', space, ... - 'fwhm', 8, ... + 'fwhm', FWHM, ... 'skip_validation', true, ... 'options', opt, ... 'verbosity', 3); diff --git a/src/bidspm/parsers.py b/src/bidspm/parsers.py index 2256d0412..89dea0df9 100644 --- a/src/bidspm/parsers.py +++ b/src/bidspm/parsers.py @@ -74,7 +74,7 @@ def _base_parser() -> ArgumentParser: parser.add_argument( "analysis_level", help=""" - Level of the analysis that will be performed. + Level of the analysis that wsub_command_parserill be performed. Multiple participant level analyses can be run independently (in parallel) using the same ``output_dir``. """,