From d2b10c65b8f32b82a51fc121d6a75d60d7b5af94 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 28 May 2024 21:05:22 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .pre-commit-config.yaml | 2 +- README.md | 12 ++++++------ docs/requirements.txt | 4 ++-- pyproject.toml | 2 +- setup.py | 11 +++++------ src/scrnaseq/list_datasets.py | 2 +- src/scrnaseq/polish_dataset.py | 2 -- src/scrnaseq/save_dataset.py | 2 +- src/scrnaseq/search_datasets.py | 5 +---- src/scrnaseq/utils.py | 2 +- tests/conftest.py | 11 +++++------ tests/test_save_dataset.py | 3 ++- 12 files changed, 26 insertions(+), 32 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 85550b3..d7f72c3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -49,4 +49,4 @@ repos: # - repo: https://github.com/codespell-project/codespell # rev: v2.2.5 # hooks: -# - id: codespell \ No newline at end of file +# - id: codespell diff --git a/README.md b/README.md index deee664..cc6807f 100644 --- a/README.md +++ b/README.md @@ -97,7 +97,7 @@ print(sce) # alternative_experiments(2): ['repeat', 'ERCC'] # row_pairs(0): [] # column_pairs(0): [] -# metadata(0): +# metadata(0): ``` For studies that generate multiple datasets, the dataset of interest must be explicitly requested via the `path` argument: @@ -114,12 +114,12 @@ print(sce) # row_names(20125): ['A1BG', 'A1CF', 'A2M', ..., 'ZZEF1', 'ZZZ3', 'pk'] # column_data columns(2): ['donor', 'label'] # column_names(8569): ['human1_lib1.final_cell_0001', 'human1_lib1.final_cell_0002', 'human1_lib1.final_cell_0003', ..., 'human4_lib3.final_cell_0699', 'human4_lib3.final_cell_0700', 'human4_lib3.final_cell_0701'] -# main_experiment_name: +# main_experiment_name: # reduced_dims(0): [] # alternative_experiments(0): [] # row_pairs(0): [] # column_pairs(0): [] -# metadata(0): +# metadata(0): ``` By default, array data is loaded as a file-backed `DelayedArray` from the [HDF5Array](https://github.com/BiocPy/HDF5Array) package. Setting `realize_assays=True` and/or `realize_reduced_dims=True` will coerce file-backed arrays to numpy or scipy sparse (csr/csc) objects. @@ -136,7 +136,7 @@ print(sce) # row_names(20125): ['A1BG', 'A1CF', 'A2M', ..., 'ZZEF1', 'ZZZ3', 'pk'] # column_data columns(2): ['donor', 'label'] # column_names(8569): ['human1_lib1.final_cell_0001', 'human1_lib1.final_cell_0002', 'human1_lib1.final_cell_0003', ..., 'human4_lib3.final_cell_0699', 'human4_lib3.final_cell_0700', 'human4_lib3.final_cell_0701'] -# main_experiment_name: +# main_experiment_name: # reduced_dims(0): [] # alternative_experiments(0): [] # row_pairs(0): [] @@ -215,12 +215,12 @@ Want to contribute your own dataset to this package? It's easy! Just follow thes - An Python file containing the code used to assemble the dataset. This should be added to the [`scripts/`](https://github.com/BiocPy/scRNAseq/tree/master/scripts) directory of this package, in order to provide some record of how the dataset was created. 5. Wait for us to grant temporary upload permissions to your GitHub account. - + 6. Upload your staging directory to [**gypsum** backend](https://github.com/ArtifactDB/gypsum-worker) with `upload_dataset()`. On the first call to this function, it will automatically prompt you to log into GitHub so that the backend can authenticate you. If you are on a system without browser access (e.g., most computing clusters), a [token](https://github.com/settings/tokens) can be manually supplied via `set_access_token()`. ```python from scrnaseq import upload_dataset - + upload_dataset(staging_dir, "my_dataset_name", "my_version") ``` diff --git a/docs/requirements.txt b/docs/requirements.txt index 8aed2c8..daecbf1 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,8 +1,8 @@ +furo # Requirements file for ReadTheDocs, check .readthedocs.yml. # To build the module reference correctly, make sure every external package # under `install_requires` in `setup.cfg` is also listed here! # sphinx_rtd_theme myst-parser[linkify] sphinx>=3.2.1 -furo -sphinx-autodoc-typehints \ No newline at end of file +sphinx-autodoc-typehints diff --git a/pyproject.toml b/pyproject.toml index b0b8763..0514df9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,4 +21,4 @@ convention = "google" "__init__.py" = ["E402", "F401"] [tool.black] -force-exclude = "__init__.py" \ No newline at end of file +force-exclude = "__init__.py" diff --git a/setup.py b/setup.py index 26b2cdf..8279d2c 100644 --- a/setup.py +++ b/setup.py @@ -1,11 +1,10 @@ -""" - Setup file for scrnaseq. - Use setup.cfg to configure your project. +"""Setup file for scrnaseq. Use setup.cfg to configure your project. - This file was generated with PyScaffold 4.5. - PyScaffold helps you to put up the scaffold of your new Python project. - Learn more under: https://pyscaffold.org/ +This file was generated with PyScaffold 4.5. +PyScaffold helps you to put up the scaffold of your new Python project. +Learn more under: https://pyscaffold.org/ """ + from setuptools import setup if __name__ == "__main__": diff --git a/src/scrnaseq/list_datasets.py b/src/scrnaseq/list_datasets.py index 8ef72ca..f79cc84 100644 --- a/src/scrnaseq/list_datasets.py +++ b/src/scrnaseq/list_datasets.py @@ -67,7 +67,7 @@ def list_datasets( def _format_query_results(results: list, key_names: list): - """Format the results from sqlite as a pandas dataframe + """Format the results from sqlite as a pandas dataframe. Key names must be in the exact same order as the query. """ diff --git a/src/scrnaseq/polish_dataset.py b/src/scrnaseq/polish_dataset.py index 04f1fa2..3dff829 100644 --- a/src/scrnaseq/polish_dataset.py +++ b/src/scrnaseq/polish_dataset.py @@ -1,8 +1,6 @@ from typing import Type -from warnings import warn import numpy as np -from delayedarray import DelayedArray from scipy import sparse as sp from singlecellexperiment import SingleCellExperiment from summarizedexperiment import SummarizedExperiment diff --git a/src/scrnaseq/save_dataset.py b/src/scrnaseq/save_dataset.py index 317d2fe..edb78c3 100644 --- a/src/scrnaseq/save_dataset.py +++ b/src/scrnaseq/save_dataset.py @@ -29,7 +29,7 @@ def save_dataset(x: Any, path, metadata): metadata: Dictionary containing the metadata for this dataset. - see the schema returned by + see the schema returned by :py:func:`~gypsum_client.fetch_metadata_schema.fetch_metadata_schema`. Note that the ``applications.takane`` property will be automatically diff --git a/src/scrnaseq/search_datasets.py b/src/scrnaseq/search_datasets.py index 97062da..b31a070 100644 --- a/src/scrnaseq/search_datasets.py +++ b/src/scrnaseq/search_datasets.py @@ -6,8 +6,6 @@ from gypsum_client import cache_directory, fetch_metadata_database from gypsum_client.search_metadata import ( GypsumSearchClause, - define_text_query, - search_metadata_text, search_metadata_text_filter, ) @@ -25,8 +23,7 @@ def search_datasets( overwrite: bool = False, latest: bool = True, ) -> pd.DataFrame: - """Search for datasets of interest based on matching text in the - associated metadata. + """Search for datasets of interest based on matching text in the associated metadata. This is a wrapper around :py:func:`~gypsum_client.search_metadata.search_metadata_text`. diff --git a/src/scrnaseq/utils.py b/src/scrnaseq/utils.py index eee5deb..920ad8d 100644 --- a/src/scrnaseq/utils.py +++ b/src/scrnaseq/utils.py @@ -94,7 +94,7 @@ def format_object_metadata(x) -> dict: """Format object related metadata. Create object-related metadata to validate against the default - schema from + schema from :py:func:`~gypsum_client.fetch_metadata_schema.fetch_metadata_schema`. This is intended for downstream package developers who are auto-generating metadata documents to be validated by diff --git a/tests/conftest.py b/tests/conftest.py index 72c29e9..d716147 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,10 +1,9 @@ -""" - Dummy conftest.py for scrnaseq. +"""Dummy conftest.py for scrnaseq. - If you don't know what this is for, just leave it empty. - Read more about conftest.py under: - - https://docs.pytest.org/en/stable/fixture.html - - https://docs.pytest.org/en/stable/writing_plugins.html +If you don't know what this is for, just leave it empty. +Read more about conftest.py under: +- https://docs.pytest.org/en/stable/fixture.html +- https://docs.pytest.org/en/stable/writing_plugins.html """ # import pytest diff --git a/tests/test_save_dataset.py b/tests/test_save_dataset.py index 7396f6d..08433a4 100644 --- a/tests/test_save_dataset.py +++ b/tests/test_save_dataset.py @@ -82,7 +82,8 @@ def test_save_dataset_anndata(): assert isinstance(roundtrip.get_assays()["counts"], ReloadedArray) assert isinstance(adata.layers["counts"], np.ndarray) assert np.array_equal( - to_dense_array(roundtrip.get_assays()["counts"]).transpose(), adata.layers["counts"] + to_dense_array(roundtrip.get_assays()["counts"]).transpose(), + adata.layers["counts"], ) # Load and check the metadata