Skip to content

Commit

Permalink
Merge pull request #11 from SingleR-inc/py38-eol
Browse files Browse the repository at this point in the history
  • Loading branch information
jkanche authored Dec 11, 2024
2 parents 5b3e66d + 63effeb commit 063db66
Show file tree
Hide file tree
Showing 9 changed files with 52 additions and 72 deletions.
8 changes: 4 additions & 4 deletions .github/workflows/pypi-publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,12 @@ jobs:
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4

- name: Set up Python 3.9
uses: actions/setup-python@v2
- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: 3.9
python-version: 3.11

# build SQLite from source, because I need 3.35<=
- name: Download SQLite3
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/pypi-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,13 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
python-version: [ "3.9", "3.10", "3.11", "3.12" ]

name: Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v2
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: "pip"
Expand Down
25 changes: 13 additions & 12 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,26 +17,27 @@ repos:
- id: mixed-line-ending
args: ['--fix=auto'] # replace 'auto' with 'lf' to enforce Linux/Mac line endings or 'crlf' for Windows

- repo: https://github.com/PyCQA/docformatter
rev: v1.7.5
hooks:
- id: docformatter
additional_dependencies: [tomli]
args: [--in-place, --wrap-descriptions=120, --wrap-summaries=120]
# --config, ./pyproject.toml
# - repo: https://github.com/PyCQA/docformatter
# rev: master
# hooks:
# - id: docformatter
# additional_dependencies: [tomli]
# args: [--in-place, --wrap-descriptions=120, --wrap-summaries=120]
# # --config, ./pyproject.toml

- repo: https://github.com/psf/black
rev: 24.8.0
hooks:
- id: black
language_version: python3
# - repo: https://github.com/psf/black
# rev: 24.8.0
# hooks:
# - id: black
# language_version: python3

- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.6.8
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
- id: ruff-format

## If like to embrace black styles even in the docs:
# - repo: https://github.com/asottile/blacken-docs
Expand Down
11 changes: 7 additions & 4 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
# Changelog

## Version 0.1 (development)
## Version 0.2.0

- Feature A added
- FIX: nasty bug #1729 fixed
- add your changes here!
- chore: Remove Python 3.8 (EOL)
- precommit: Replace docformatter with ruff's formatter

## Version 0.1.0

- Initial version of the package.
9 changes: 9 additions & 0 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@
"sphinx.ext.ifconfig",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints",
]

# Add any paths that contain templates here, relative to this directory.
Expand Down Expand Up @@ -166,6 +167,14 @@
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True

autodoc_default_options = {
'special-members': True,
'undoc-members': False,
'exclude-members': '__weakref__, __dict__, __str__, __module__, __init__'
}

autosummary_generate = True
autosummary_imported_members = True

# -- Options for HTML output -------------------------------------------------

Expand Down
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ package_dir =
=src

# Require a min/specific Python version (comma-separated conditions)
python_requires = >=3.8
python_requires = >=3.9

# Add here dependencies of your project (line-separated), e.g. requests>=2.2,<3.0.
# Version specifiers like >=2.2,<3.0 avoid problems due to API changes in
Expand Down
12 changes: 3 additions & 9 deletions src/celldex/fetch_reference.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,12 +81,8 @@ def fetch_reference(
or one of its subclasses.
"""

version_path = save_version(
package, name, version, cache_dir=cache_dir, overwrite=overwrite
)
obj_path = (
version_path if path is None else os.path.join(version_path, path.rstrip("/"))
)
version_path = save_version(package, name, version, cache_dir=cache_dir, overwrite=overwrite)
obj_path = version_path if path is None else os.path.join(version_path, path.rstrip("/"))

old = alt_read_object_function(celldex_load_object)

Expand Down Expand Up @@ -147,9 +143,7 @@ def fetch_metadata(
Dictionary containing metadata for the specified dataset.
"""
remote_path = "_bioconductor.json" if path is None else f"{path}/_bioconductor.json"
local_path = save_file(
package, name, version, remote_path, cache_dir=cache_dir, overwrite=overwrite
)
local_path = save_file(package, name, version, remote_path, cache_dir=cache_dir, overwrite=overwrite)

with open(local_path, "r") as f:
metadata = json.load(f)
Expand Down
43 changes: 10 additions & 33 deletions src/celldex/list_references.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,7 @@


@lru_cache
def list_references(
cache_dir: str = cache_directory(), overwrite: bool = False, latest: bool = True
) -> pd.DataFrame:
def list_references(cache_dir: str = cache_directory(), overwrite: bool = False, latest: bool = True) -> pd.DataFrame:
"""List all available reference datasets.
Example:
Expand Down Expand Up @@ -83,9 +81,7 @@ def _format_query_results(results: list, key_names: list):


def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "meta"):
_all_paths = [
None if "/" not in p else p.rsplit("/", 1)[0] for p in results["path"]
]
_all_paths = [None if "/" not in p else p.rsplit("/", 1)[0] for p in results["path"]]

df = pd.DataFrame(
{
Expand All @@ -105,33 +101,22 @@ def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "met
)
df["title"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("title"))
df["description"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("title"))
df["taxonomy_id"] = _extract_charlist_from_json(
_all_metas, lambda x: x.get("taxonomy_id")
)
df["taxonomy_id"] = _extract_charlist_from_json(_all_metas, lambda x: x.get("taxonomy_id"))
df["genome"] = _extract_charlist_from_json(_all_metas, lambda x: x.get("genome"))

df["rows"] = _extract_atomic_from_json(
_all_metas,
lambda x: x.get("applications", {})
.get("takane", {})
.get("summarized_experiment", {})
.get("rows"),
lambda x: x.get("applications", {}).get("takane", {}).get("summarized_experiment", {}).get("rows"),
)

df["columns"] = _extract_atomic_from_json(
_all_metas,
lambda x: x.get("applications", {})
.get("takane", {})
.get("summarized_experiment", {})
.get("columns"),
lambda x: x.get("applications", {}).get("takane", {}).get("summarized_experiment", {}).get("columns"),
)

df["assays"] = _extract_charlist_from_json(
_all_metas,
lambda x: x.get("applications", {})
.get("takane", {})
.get("summarized_experiment", {})
.get("assays"),
lambda x: x.get("applications", {}).get("takane", {}).get("summarized_experiment", {}).get("assays"),
)
df["column_annotations"] = _extract_charlist_from_json(
_all_metas,
Expand All @@ -155,15 +140,9 @@ def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "met
.get("alternative_experiments"),
)

df["bioconductor_version"] = _extract_atomic_from_json(
_all_metas, lambda x: x.get("bioconductor_version")
)
df["maintainer_name"] = _extract_atomic_from_json(
_all_metas, lambda x: x.get("maintainer_name")
)
df["maintainer_email"] = _extract_atomic_from_json(
_all_metas, lambda x: x.get("maintainer_email")
)
df["bioconductor_version"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("bioconductor_version"))
df["maintainer_name"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("maintainer_name"))
df["maintainer_email"] = _extract_atomic_from_json(_all_metas, lambda x: x.get("maintainer_email"))

sources = []
for meta in _all_metas:
Expand All @@ -186,9 +165,7 @@ def _sanitize_query_to_output(results: list, latest: bool, meta_name: str = "met


def _extract_atomic_from_json(metadata, extract):
return [
extract(_meta) if extract(_meta) is not None else None for _meta in metadata
]
return [extract(_meta) if extract(_meta) is not None else None for _meta in metadata]


def _extract_charlist_from_json(metadata, extract):
Expand Down
8 changes: 2 additions & 6 deletions src/celldex/save_reference.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,9 +91,7 @@ def save_reference(x: Any, labels: List[str], path: str, metadata: dict):
# Save the reference
celldex.save_reference(sce, cache_dir, meta)
"""
raise NotImplementedError(
f"'save_dataset' is not supported for objects of class: {type(x)}"
)
raise NotImplementedError(f"'save_dataset' is not supported for objects of class: {type(x)}")


def _save_se(x: SummarizedExperiment, path, metadata):
Expand All @@ -112,9 +110,7 @@ def _save_se(x: SummarizedExperiment, path, metadata):
for _cn in _cols.get_column_names():
_data = _cols.get_column(_cn)
if not all(isinstance(y, str) for y in _data):
raise ValueError(
f"All labels in 'column_data' must be a list of strings; column {_cn} does not."
)
raise ValueError(f"All labels in 'column_data' must be a list of strings; column {_cn} does not.")

if "logcounts" not in list(x.get_assay_names()):
raise ValueError("Assay 'logcounts' does not exist.")
Expand Down

0 comments on commit 063db66

Please sign in to comment.