Skip to content

Commit 11cc154

Browse files
authored
Merge pull request #213 from lilab-bcb/compat
Updated for latest anndata. Updated pre-commit
2 parents 2c00357 + d620c40 commit 11cc154

File tree

11 files changed

+56
-420
lines changed

11 files changed

+56
-420
lines changed

.pre-commit-config.yaml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
repos:
22
- repo: https://github.com/pre-commit/pre-commit-hooks
3-
rev: v4.4.0
3+
rev: v4.6.0
44
hooks:
55
# - id: double-quote-string-fixer # for single quotes: uncomment and add black config “skip-string-normalization”
66
- id: trailing-whitespace
@@ -11,19 +11,19 @@ repos:
1111
- id: docformatter
1212
args: ["--in-place", "--wrap-summaries=100", "--wrap-descriptions=100", "--config=./pyproject.toml"]
1313
- repo: https://github.com/psf/black
14-
rev: 23.9.1
14+
rev: 24.8.0
1515
hooks:
1616
- id: black
1717
- repo: https://github.com/pycqa/isort
18-
rev: 5.12.0
18+
rev: 5.13.2
1919
hooks:
2020
- id: isort
2121
- repo: https://github.com/csachs/pyproject-flake8
22-
rev: v6.1.0
22+
rev: v7.0.0
2323
hooks:
2424
- id: pyproject-flake8
2525
- repo: https://github.com/pre-commit/mirrors-prettier
26-
rev: v3.0.3
26+
rev: v4.0.0-alpha.8
2727
hooks:
2828
- id: prettier
2929
types_or: [css, javascript]

cirrocumulus/abstract_backed_dataset.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,10 @@
33
import pandas as pd
44
import scipy.sparse
55
from anndata import AnnData
6+
from anndata._core.sparse_dataset import sparse_dataset
67

78
from cirrocumulus.abstract_dataset import AbstractDataset
89
from cirrocumulus.anndata_util import ADATA_LAYERS_UNS_KEY, ADATA_MODULE_UNS_KEY
9-
from cirrocumulus.sparse_dataset import SparseDataset
1010

1111

1212
# string_dtype = h5py.check_string_dtype(dataset.dtype)
@@ -73,8 +73,8 @@ def get_X(self, var_ids, keys, node):
7373
get_item = var_ids.get_indexer_for(keys)
7474

7575
if self.is_group(node):
76-
sparse_dataset = SparseDataset(node) # sparse
77-
X = sparse_dataset[:, get_item]
76+
ds = sparse_dataset(node) # sparse
77+
X = ds[:, get_item]
7878
else: # dense
7979
X = self.slice_dense_array(node, get_item)
8080
var = pd.DataFrame(index=keys)

cirrocumulus/concat.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -148,9 +148,11 @@ def concat_spatial(paths: list[str], output_path: str, ncols: int = 2):
148148

149149
def create_parser(description=False):
150150
parser = argparse.ArgumentParser(
151-
description="Concatenate datasets in a grid layout. If all the datasets are spatial datasets, then tissue images are concatenated."
152-
if description
153-
else None
151+
description=(
152+
"Concatenate datasets in a grid layout. If all the datasets are spatial datasets, then tissue images are concatenated."
153+
if description
154+
else None
155+
)
154156
)
155157
parser.add_argument(
156158
"dataset",

cirrocumulus/diff_exp.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -137,11 +137,15 @@ def __init__(
137137
scores=scores,
138138
pvals=pvals,
139139
logfoldchanges=foldchanges,
140-
frac_expressed1=frac_expressed_df.loc[group_one].values
141-
if frac_expressed_df is not None
142-
else None,
143-
frac_expressed2=frac_expressed_df.loc[group_two].values
144-
if frac_expressed_df is not None
145-
else None,
140+
frac_expressed1=(
141+
frac_expressed_df.loc[group_one].values
142+
if frac_expressed_df is not None
143+
else None
144+
),
145+
frac_expressed2=(
146+
frac_expressed_df.loc[group_two].values
147+
if frac_expressed_df is not None
148+
else None
149+
),
146150
)
147151
self.pair2results = pair2results

cirrocumulus/local_db_api.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,10 @@ def __init__(self, paths: list[str]):
5050
if url.lower().endswith(".json.gz") or url.lower().endswith(".json"):
5151
import gzip
5252

53-
with gzip.open(fs.open(url)) if url.lower().endswith(".json.gz") else fs.open(
54-
url
53+
with (
54+
gzip.open(fs.open(url))
55+
if url.lower().endswith(".json.gz")
56+
else fs.open(url)
5557
) as f:
5658
d = json.load(f)
5759
if "id" in d:

0 commit comments

Comments
 (0)