diff --git a/.github/workflows/pypi-publish.yml b/.github/workflows/pypi-publish.yml index 1ed6efa..fdf292a 100644 --- a/.github/workflows/pypi-publish.yml +++ b/.github/workflows/pypi-publish.yml @@ -12,12 +12,12 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - - name: Set up Python 3.9 - uses: actions/setup-python@v2 + - name: Set up Python 3.11 + uses: actions/setup-python@v5 with: - python-version: 3.9 + python-version: 3.11 # build SQLite from source, because I need 3.35<= - name: Download SQLite3 diff --git a/.github/workflows/pypi-test.yml b/.github/workflows/pypi-test.yml index 624ea5b..53bd855 100644 --- a/.github/workflows/pypi-test.yml +++ b/.github/workflows/pypi-test.yml @@ -11,13 +11,13 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: [ "3.9", "3.10", "3.11", "3.12" ] name: Python ${{ matrix.python-version }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: "pip" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3c9601c..e60a5f4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,19 +17,19 @@ repos: - id: mixed-line-ending args: ['--fix=auto'] # replace 'auto' with 'lf' to enforce Linux/Mac line endings or 'crlf' for Windows -- repo: https://github.com/PyCQA/docformatter - rev: v1.7.5 - hooks: - - id: docformatter - additional_dependencies: [tomli] - args: [--in-place, --wrap-descriptions=120, --wrap-summaries=120] - # --config, ./pyproject.toml +# - repo: https://github.com/PyCQA/docformatter +# rev: master +# hooks: +# - id: docformatter +# additional_dependencies: [tomli] +# args: [--in-place, --wrap-descriptions=120, --wrap-summaries=120] +# # --config, ./pyproject.toml -- repo: https://github.com/psf/black - rev: 24.8.0 - hooks: - - id: black - language_version: python3 +# - repo: https://github.com/psf/black +# rev: 24.8.0 +# hooks: +# - id: black +# language_version: python3 - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. @@ -37,6 +37,7 @@ repos: hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] + - id: ruff-format ## If like to embrace black styles even in the docs: # - repo: https://github.com/asottile/blacken-docs diff --git a/CHANGELOG.md b/CHANGELOG.md index 205cc5e..2fd90d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,10 @@ # Changelog -## Version 0.1 (development) +## Version 0.2.0 -- Feature A added -- FIX: nasty bug #1729 fixed -- add your changes here! +- chore: Remove Python 3.8 (EOL) +- precommit: Replace docformatter with ruff's formatter + +## Version 0.1.0 + +- Initial version of the package. diff --git a/docs/conf.py b/docs/conf.py index 93a4f42..a4b32f2 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -72,6 +72,7 @@ "sphinx.ext.ifconfig", "sphinx.ext.mathjax", "sphinx.ext.napoleon", + "sphinx_autodoc_typehints", ] # Add any paths that contain templates here, relative to this directory. @@ -166,6 +167,14 @@ # If this is True, todo emits a warning for each TODO entries. The default is False. todo_emit_warnings = True +autodoc_default_options = { + 'special-members': True, + 'undoc-members': False, + 'exclude-members': '__weakref__, __dict__, __str__, __module__, __init__' + } + +autosummary_generate = True +autosummary_imported_members = True # -- Options for HTML output ------------------------------------------------- diff --git a/setup.cfg b/setup.cfg index b01b945..45da90d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -41,7 +41,7 @@ package_dir = =src # Require a min/specific Python version (comma-separated conditions) -python_requires = >=3.8 +python_requires = >=3.9 # Add here dependencies of your project (line-separated), e.g. requests>=2.2,<3.0. # Version specifiers like >=2.2,<3.0 avoid problems due to API changes in diff --git a/src/celldex/fetch_reference.py b/src/celldex/fetch_reference.py index b3b7a9c..1485c13 100644 --- a/src/celldex/fetch_reference.py +++ b/src/celldex/fetch_reference.py @@ -81,12 +81,8 @@ def fetch_reference( or one of its subclasses. """ - version_path = save_version( - package, name, version, cache_dir=cache_dir, overwrite=overwrite - ) - obj_path = ( - version_path if path is None else os.path.join(version_path, path.rstrip("/")) - ) + version_path = save_version(package, name, version, cache_dir=cache_dir, overwrite=overwrite) + obj_path = version_path if path is None else os.path.join(version_path, path.rstrip("/")) old = alt_read_object_function(celldex_load_object) @@ -147,9 +143,7 @@ def fetch_metadata( Dictionary containing metadata for the specified dataset. """ remote_path = "_bioconductor.json" if path is None else f"{path}/_bioconductor.json" - local_path = save_file( - package, name, version, remote_path, cache_dir=cache_dir, overwrite=overwrite - ) + local_path = save_file(package, name, version, remote_path, cache_dir=cache_dir, overwrite=overwrite) with open(local_path, "r") as f: metadata = json.load(f) diff --git a/src/celldex/list_references.py b/src/celldex/list_references.py index 7f8fe26..3d2cc04 100644 --- a/src/celldex/list_references.py +++ b/src/celldex/list_references.py @@ -14,9 +14,7 @@ @lru_cache -def list_references( - cache_dir: str = cache_directory(), overwrite: bool = False, latest: bool = True -) -> pd.DataFrame: +def list_references(cache_dir: str = cache_directory(), overwrite: bool = False, latest: bool = True) -> pd.DataFrame: """List all available reference datasets. Example: @@ -83,9 +81,7 @@ def _format_query_results(results: list, key_names: list): def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "meta"): - _all_paths = [ - None if "/" not in p else p.rsplit("/", 1)[0] for p in results["path"] - ] + _all_paths = [None if "/" not in p else p.rsplit("/", 1)[0] for p in results["path"]] df = pd.DataFrame( { @@ -105,33 +101,22 @@ def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "met ) df["title"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("title")) df["description"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("title")) - df["taxonomy_id"] = _extract_charlist_from_json( - _all_metas, lambda x: x.get("taxonomy_id") - ) + df["taxonomy_id"] = _extract_charlist_from_json(_all_metas, lambda x: x.get("taxonomy_id")) df["genome"] = _extract_charlist_from_json(_all_metas, lambda x: x.get("genome")) df["rows"] = _extract_atomic_from_json( _all_metas, - lambda x: x.get("applications", {}) - .get("takane", {}) - .get("summarized_experiment", {}) - .get("rows"), + lambda x: x.get("applications", {}).get("takane", {}).get("summarized_experiment", {}).get("rows"), ) df["columns"] = _extract_atomic_from_json( _all_metas, - lambda x: x.get("applications", {}) - .get("takane", {}) - .get("summarized_experiment", {}) - .get("columns"), + lambda x: x.get("applications", {}).get("takane", {}).get("summarized_experiment", {}).get("columns"), ) df["assays"] = _extract_charlist_from_json( _all_metas, - lambda x: x.get("applications", {}) - .get("takane", {}) - .get("summarized_experiment", {}) - .get("assays"), + lambda x: x.get("applications", {}).get("takane", {}).get("summarized_experiment", {}).get("assays"), ) df["column_annotations"] = _extract_charlist_from_json( _all_metas, @@ -155,15 +140,9 @@ def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "met .get("alternative_experiments"), ) - df["bioconductor_version"] = _extract_atomic_from_json( - _all_metas, lambda x: x.get("bioconductor_version") - ) - df["maintainer_name"] = _extract_atomic_from_json( - _all_metas, lambda x: x.get("maintainer_name") - ) - df["maintainer_email"] = _extract_atomic_from_json( - _all_metas, lambda x: x.get("maintainer_email") - ) + df["bioconductor_version"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("bioconductor_version")) + df["maintainer_name"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("maintainer_name")) + df["maintainer_email"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("maintainer_email")) sources = [] for meta in _all_metas: @@ -186,9 +165,7 @@ def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "met def _extract_atomic_from_json(metadata, extract): - return [ - extract(_meta) if extract(_meta) is not None else None for _meta in metadata - ] + return [extract(_meta) if extract(_meta) is not None else None for _meta in metadata] def _extract_charlist_from_json(metadata, extract): diff --git a/src/celldex/save_reference.py b/src/celldex/save_reference.py index 8cbc230..538a93b 100644 --- a/src/celldex/save_reference.py +++ b/src/celldex/save_reference.py @@ -91,9 +91,7 @@ def save_reference(x: Any, labels: List[str], path: str, metadata: dict): # Save the reference celldex.save_reference(sce, cache_dir, meta) """ - raise NotImplementedError( - f"'save_dataset' is not supported for objects of class: {type(x)}" - ) + raise NotImplementedError(f"'save_dataset' is not supported for objects of class: {type(x)}") def _save_se(x: SummarizedExperiment, path, metadata): @@ -112,9 +110,7 @@ def _save_se(x: SummarizedExperiment, path, metadata): for _cn in _cols.get_column_names(): _data = _cols.get_column(_cn) if not all(isinstance(y, str) for y in _data): - raise ValueError( - f"All labels in 'column_data' must be a list of strings; column {_cn} does not." - ) + raise ValueError(f"All labels in 'column_data' must be a list of strings; column {_cn} does not.") if "logcounts" not in list(x.get_assay_names()): raise ValueError("Assay 'logcounts' does not exist.")