From e4d6733540722a11122a4e55f17a58b91350183f Mon Sep 17 00:00:00 2001 From: Luca Marconato Date: Fri, 6 Feb 2026 14:33:21 +0100 Subject: [PATCH 1/8] fix assert xenium shapes id --- src/spatialdata_io/readers/xenium.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/spatialdata_io/readers/xenium.py b/src/spatialdata_io/readers/xenium.py index 3d3224c6..b216c298 100644 --- a/src/spatialdata_io/readers/xenium.py +++ b/src/spatialdata_io/readers/xenium.py @@ -402,6 +402,8 @@ def filter(self, record: logging.LogRecord) -> bool: def _decode_cell_id_column(cell_id_column: pd.Series) -> pd.Series: if isinstance(cell_id_column.iloc[0], bytes): return cell_id_column.str.decode("utf-8") + if not isinstance(cell_id_column.iloc[0], str): + cell_id_column.index = cell_id_column.index.astype(str) return cell_id_column @@ -448,7 +450,7 @@ def _get_polygons( if version is not None and version < packaging.version.parse("2.0.0"): assert idx is not None assert len(idx) == len(geo_df) - assert index.equals(idx) + assert np.array_equal(index.values, idx.values) else: if np.unique(geo_df.index).size != len(geo_df): warnings.warn( From eae5858882b991af147765009e9f8e2b24fc8a79 Mon Sep 17 00:00:00 2001 From: Luca Marconato Date: Fri, 6 Feb 2026 14:35:50 +0100 Subject: [PATCH 2/8] cleanup --- src/spatialdata_io/readers/xenium.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/src/spatialdata_io/readers/xenium.py b/src/spatialdata_io/readers/xenium.py index b216c298..fb306cda 100644 --- a/src/spatialdata_io/readers/xenium.py +++ b/src/spatialdata_io/readers/xenium.py @@ -20,7 +20,6 @@ from dask.dataframe import read_parquet from dask_image.imread import imread from geopandas import GeoDataFrame -from joblib import Parallel, delayed from pyarrow import Table from shapely import GeometryType, Polygon, from_ragged_array from spatialdata import SpatialData @@ -223,18 +222,16 @@ def xenium( # labels. if nucleus_labels: labels["nucleus_labels"], _ = _get_labels_and_indices_mapping( - path, - XeniumKeys.CELLS_ZARR, - specs, + path=path, + specs=specs, mask_index=0, labels_name="nucleus_labels", labels_models_kwargs=labels_models_kwargs, ) if cells_labels: labels["cell_labels"], cell_labels_indices_mapping = _get_labels_and_indices_mapping( - path, - XeniumKeys.CELLS_ZARR, - specs, + path=path, + specs=specs, mask_index=1, labels_name="cell_labels", labels_models_kwargs=labels_models_kwargs, @@ -411,7 +408,7 @@ def _get_polygons( path: Path, file: str, specs: dict[str, Any], - idx: ArrayLike | None = None, + idx: pd.Series | None = None, ) -> GeoDataFrame: # seems to be faster than pd.read_parquet df = pq.read_table(path / file).to_pandas() @@ -466,7 +463,6 @@ def _get_polygons( def _get_labels_and_indices_mapping( path: Path, - file: str, specs: dict[str, Any], mask_index: int, labels_name: str, From f15a04ed4334ba3823360c20ed9ea42ef4f84368 Mon Sep 17 00:00:00 2001 From: Luca Marconato Date: Sat, 7 Feb 2026 11:22:17 +0100 Subject: [PATCH 3/8] attempt fix docs --- docs/conf.py | 1 + pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index f9d83c88..c2eeea45 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -127,6 +127,7 @@ # If building the documentation fails because of a missing link that is outside your control, # you can add an exception to this list. ("py:class", "Path"), + ("py:class", "pathlib._local.Path"), ("py:class", "AnnData"), ("py:class", "SpatialData"), ("py:func", "imageio.imread"), # maybe this can be fixed diff --git a/pyproject.toml b/pyproject.toml index 8c0c1087..79fa7dcd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,7 +46,7 @@ dev = [ "pre-commit" ] doc = [ - "sphinx>=4.5", + "sphinx>=4.5,<9", "sphinx-book-theme>=1.0.0", "myst-nb", "sphinxcontrib-bibtex>=1.0.0", From 9f1f71325b0a8867340c5752f15c2d7be7a6e849 Mon Sep 17 00:00:00 2001 From: Luca Marconato Date: Sat, 7 Feb 2026 12:14:05 +0100 Subject: [PATCH 4/8] xenium: avoid calling get_element_instances() when the info is in the metadata; remove deprecaction warning --- src/spatialdata_io/readers/xenium.py | 71 +++++++++++++++------------- 1 file changed, 37 insertions(+), 34 deletions(-) diff --git a/src/spatialdata_io/readers/xenium.py b/src/spatialdata_io/readers/xenium.py index fb306cda..50192fc3 100644 --- a/src/spatialdata_io/readers/xenium.py +++ b/src/spatialdata_io/readers/xenium.py @@ -24,6 +24,7 @@ from shapely import GeometryType, Polygon, from_ragged_array from spatialdata import SpatialData from spatialdata._core.query.relational_query import get_element_instances +from spatialdata._logging import logger from spatialdata.models import ( Image2DModel, Labels2DModel, @@ -60,7 +61,7 @@ def xenium( *, cells_boundaries: bool = True, nucleus_boundaries: bool = True, - cells_as_circles: bool | None = None, + cells_as_circles: bool = False, cells_labels: bool = True, nucleus_labels: bool = True, transcripts: bool = True, @@ -135,7 +136,7 @@ def xenium( Notes ----- - Old versions. Until spatialdata-io v0.1.3post0: previously, `cells_as_circles` was `True` by default; the table was associated to the + Old versions. Until spatialdata-io v0.6.0: `cells_as_circles` was `True` by default; the table was associated to the circles when `cells_as_circles` was `True`, and the table was associated to the polygons when `cells_as_circles` was `False`; the radii of the circles were computed form the nuclei instead of the cells. @@ -152,14 +153,6 @@ def xenium( ... ) >>> sdata.write("path/to/data.zarr") """ - if cells_as_circles is None: - cells_as_circles = True - warnings.warn( - "The default value of `cells_as_circles` will change to `False` in the next release. " - "Please pass `True` explicitly to maintain the current behavior.", - DeprecationWarning, - stacklevel=3, - ) image_models_kwargs, labels_models_kwargs = _initialize_raster_models_kwargs( image_models_kwargs, labels_models_kwargs ) @@ -357,8 +350,8 @@ def filter(self, record: logging.LogRecord) -> bool: return False return True - logger = tifffile.logger() - logger.addFilter(IgnoreSpecificMessage()) + tf_logger = tifffile.logger() + tf_logger.addFilter(IgnoreSpecificMessage()) image_models_kwargs = dict(image_models_kwargs) assert "c_coords" not in image_models_kwargs, ( "The channel names for the morphology focus images are handled internally" @@ -371,7 +364,7 @@ def filter(self, record: logging.LogRecord) -> bool: image_models_kwargs, ) del image_models_kwargs["c_coords"] - logger.removeFilter(IgnoreSpecificMessage()) + tf_logger.removeFilter(IgnoreSpecificMessage()) if table is not None: tables["table"] = table @@ -491,36 +484,46 @@ def _get_labels_and_indices_mapping( cell_id, dataset_suffix = z["cell_id"][...].T cell_id_str = cell_id_str_from_prefix_suffix_uint32(cell_id, dataset_suffix) - # this information will probably be available in the `label_id` column for version > 2.0.0 (see public - # release notes mentioned above) - real_label_index = get_element_instances(labels).values - - # background removal - if real_label_index[0] == 0: - real_label_index = real_label_index[1:] - if version < packaging.version.parse("2.0.0"): - expected_label_index = z["seg_mask_value"][...] - - if not np.array_equal(expected_label_index, real_label_index): - raise ValueError( - "The label indices from the labels differ from the ones from the input data. Please report " - f"this issue. Real label indices: {real_label_index}, expected label indices: " - f"{expected_label_index}." - ) + label_index = z["seg_mask_value"][...] else: - labels_positional_indices = z["polygon_sets"][f"{mask_index}"]["cell_index"][...] - if not np.array_equal(labels_positional_indices, np.arange(len(labels_positional_indices))): - raise ValueError( - "The positional indices of the labels do not match the expected range. Please report this issue." + import time + import tracemalloc + + tracemalloc.start() + + start = time.time() + # For v >= 2.0.0, seg_mask_value is no longer available in the zarr; + # read label_id from the corresponding parquet boundary file instead + boundaries_file = XeniumKeys.NUCLEUS_BOUNDARIES_FILE if mask_index == 0 else XeniumKeys.CELL_BOUNDARIES_FILE + boundary_columns = pq.read_schema(path / boundaries_file).names + if "label_id" in boundary_columns: + boundary_df = pq.read_table(path / boundaries_file, columns=[XeniumKeys.CELL_ID, "label_id"]).to_pandas() + boundary_df[XeniumKeys.CELL_ID] = _decode_cell_id_column(boundary_df[XeniumKeys.CELL_ID]) + # each vertex row repeats the cell_id and label_id; get unique pairs + unique_pairs = boundary_df.drop_duplicates(subset=[XeniumKeys.CELL_ID, "label_id"]) + cell_id_to_label_id = unique_pairs.set_index(XeniumKeys.CELL_ID)["label_id"] + label_index = cell_id_to_label_id.loc[cell_id_str].values + else: + # fallback for dev versions around 2.0.0 that lack both seg_mask_value and label_id + logger.warn( + f"Could not find the labels ids from the metadata for version {version}. Using a fallback (slower) implementation." ) + label_index = get_element_instances(labels).values + if label_index[0] == 0: + label_index = label_index[1:] + + print(f"reading the indices: {time.time() - start}") + current, peak = tracemalloc.get_traced_memory() + print(f"Current memory usage is {current / 1024**2}MB; Peak was {peak / 1024**2}MB.") + tracemalloc.stop() # labels_index is an uint32, so let's cast to np.int64 to avoid the risk of overflow on some systems indices_mapping = pd.DataFrame( { "region": labels_name, "cell_id": cell_id_str, - "label_index": real_label_index.astype(np.int64), + "label_index": label_index.astype(np.int64), } ) # because AnnData converts the indices to str From 7143d8e87c70c508818ee8591086b5c232d85783 Mon Sep 17 00:00:00 2001 From: Luca Marconato Date: Mon, 9 Feb 2026 10:40:53 +0100 Subject: [PATCH 5/8] cleanup xenium _get_labels_and_indices_mapping() --- src/spatialdata_io/readers/xenium.py | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/src/spatialdata_io/readers/xenium.py b/src/spatialdata_io/readers/xenium.py index 50192fc3..4e185a0c 100644 --- a/src/spatialdata_io/readers/xenium.py +++ b/src/spatialdata_io/readers/xenium.py @@ -487,21 +487,14 @@ def _get_labels_and_indices_mapping( if version < packaging.version.parse("2.0.0"): label_index = z["seg_mask_value"][...] else: - import time - import tracemalloc - - tracemalloc.start() - - start = time.time() # For v >= 2.0.0, seg_mask_value is no longer available in the zarr; # read label_id from the corresponding parquet boundary file instead boundaries_file = XeniumKeys.NUCLEUS_BOUNDARIES_FILE if mask_index == 0 else XeniumKeys.CELL_BOUNDARIES_FILE boundary_columns = pq.read_schema(path / boundaries_file).names if "label_id" in boundary_columns: boundary_df = pq.read_table(path / boundaries_file, columns=[XeniumKeys.CELL_ID, "label_id"]).to_pandas() - boundary_df[XeniumKeys.CELL_ID] = _decode_cell_id_column(boundary_df[XeniumKeys.CELL_ID]) - # each vertex row repeats the cell_id and label_id; get unique pairs - unique_pairs = boundary_df.drop_duplicates(subset=[XeniumKeys.CELL_ID, "label_id"]) + unique_pairs = boundary_df.drop_duplicates(subset=[XeniumKeys.CELL_ID, "label_id"]).copy() + unique_pairs[XeniumKeys.CELL_ID] = _decode_cell_id_column(unique_pairs[XeniumKeys.CELL_ID]) cell_id_to_label_id = unique_pairs.set_index(XeniumKeys.CELL_ID)["label_id"] label_index = cell_id_to_label_id.loc[cell_id_str].values else: @@ -509,15 +502,14 @@ def _get_labels_and_indices_mapping( logger.warn( f"Could not find the labels ids from the metadata for version {version}. Using a fallback (slower) implementation." ) - label_index = get_element_instances(labels).values + import dask.config + + with dask.config.set(num_workers=1): + label_index = get_element_instances(labels).values + if label_index[0] == 0: label_index = label_index[1:] - print(f"reading the indices: {time.time() - start}") - current, peak = tracemalloc.get_traced_memory() - print(f"Current memory usage is {current / 1024**2}MB; Peak was {peak / 1024**2}MB.") - tracemalloc.stop() - # labels_index is an uint32, so let's cast to np.int64 to avoid the risk of overflow on some systems indices_mapping = pd.DataFrame( { From baddc04b119747ee806a6b8ed7ebaecd9a5d4527 Mon Sep 17 00:00:00 2001 From: Luca Marconato Date: Wed, 11 Feb 2026 16:13:36 +0100 Subject: [PATCH 6/8] remove num_workers from xenium; move normalize_chunks to spatialdata --- src/spatialdata_io/readers/_utils/_image.py | 59 +-------------------- src/spatialdata_io/readers/generic.py | 9 ++-- src/spatialdata_io/readers/xenium.py | 9 ++-- tests/readers/test_utils_image.py | 39 -------------- 4 files changed, 12 insertions(+), 104 deletions(-) diff --git a/src/spatialdata_io/readers/_utils/_image.py b/src/spatialdata_io/readers/_utils/_image.py index 3784ccca..c5c8d7e3 100644 --- a/src/spatialdata_io/readers/_utils/_image.py +++ b/src/spatialdata_io/readers/_utils/_image.py @@ -7,7 +7,7 @@ from numpy.typing import NDArray from spatialdata.models.models import Chunks_t -__all__ = ["Chunks_t", "_compute_chunks", "_read_chunks", "normalize_chunks"] +__all__ = ["Chunks_t", "_compute_chunks", "_read_chunks"] _Y_IDX = 0 """Index of y coordinate in in chunk coordinate array format: (y, x, height, width)""" @@ -143,60 +143,3 @@ def _read_chunks( for chunk_y in range(coords.shape[0]) ] return chunks - - -def normalize_chunks( - chunks: Chunks_t | None, - axes: Sequence[str], -) -> dict[str, int]: - """Normalize chunk specification to dict format. - - This function converts various chunk formats to a dict mapping dimension names - to chunk sizes. The dict format is preferred because it's explicit about which - dimension gets which chunk size and is compatible with spatialdata. - - Parameters - ---------- - chunks - Chunk specification. Can be: - - None: Uses DEFAULT_CHUNK_SIZE for all axes - - int: Applied to all axes - - tuple[int, ...]: Chunk sizes in order corresponding to axes - - dict: Mapping of axis names to chunk sizes (validated against axes) - axes - Tuple of axis names that defines the expected dimensions (e.g., ('c', 'y', 'x')). - - Returns - ------- - dict[str, int] - Dict mapping axis names to chunk sizes. - - Raises - ------ - ValueError - If chunks format is not supported or incompatible with axes. - """ - if chunks is None: - return dict.fromkeys(axes, DEFAULT_CHUNK_SIZE) - - if isinstance(chunks, int): - return dict.fromkeys(axes, chunks) - - if isinstance(chunks, Mapping): - chunks_dict = dict(chunks) - missing = set(axes) - set(chunks_dict.keys()) - if missing: - raise ValueError(f"chunks dict missing keys for axes {missing}, got: {list(chunks_dict.keys())}") - return {ax: chunks_dict[ax] for ax in axes} - - if isinstance(chunks, tuple): - if len(chunks) != len(axes): - raise ValueError(f"chunks tuple length {len(chunks)} doesn't match axes {axes} (length {len(axes)})") - if not all(isinstance(c, int) for c in chunks): - raise ValueError(f"All elements in chunks tuple must be int, got: {chunks}") - return dict(zip(axes, chunks, strict=True)) - - raise ValueError(f"Unsupported chunks type: {type(chunks)}. Expected int, tuple, dict, or None.") - - -## diff --git a/src/spatialdata_io/readers/generic.py b/src/spatialdata_io/readers/generic.py index 462989e0..95bcf66c 100644 --- a/src/spatialdata_io/readers/generic.py +++ b/src/spatialdata_io/readers/generic.py @@ -7,7 +7,6 @@ import numpy as np import tifffile from dask_image.imread import imread -from geopandas import GeoDataFrame from spatialdata._docs import docstring_parameter from spatialdata._logging import logger from spatialdata.models import Image2DModel, ShapesModel @@ -23,10 +22,12 @@ from xarray import DataArray +from spatialdata.models.chunks_utils import normalize_chunks + from spatialdata_io.readers._utils._image import ( + DEFAULT_CHUNK_SIZE, _compute_chunks, _read_chunks, - normalize_chunks, ) VALID_IMAGE_TYPES = [".tif", ".tiff", ".png", ".jpg", ".jpeg"] @@ -179,7 +180,7 @@ def image( chunks: Chunks_t | None = None, scale_factors: Sequence[int] | None = None, ) -> DataArray: - """Reads an image file and returns a parsed Image2D spatial element. + """Read an image file and returns a parsed Image2D spatial element. Parameters ---------- @@ -207,6 +208,8 @@ def image( # Map passed data axes to position of dimension axes_dim_mapping = {axes: ndim for ndim, axes in enumerate(data_axes)} + if chunks is None: + chunks = DEFAULT_CHUNK_SIZE chunks_dict = normalize_chunks(chunks, axes=data_axes) im = None diff --git a/src/spatialdata_io/readers/xenium.py b/src/spatialdata_io/readers/xenium.py index 4e185a0c..d5331a59 100644 --- a/src/spatialdata_io/readers/xenium.py +++ b/src/spatialdata_io/readers/xenium.py @@ -469,7 +469,11 @@ def _get_labels_and_indices_mapping( z = zarr.open(store, mode="r") # get the labels masks = da.from_array(z["masks"][f"{mask_index}"]) + import time + + start = time.time() labels = Labels2DModel.parse(masks, dims=("y", "x"), transformations={"global": Identity()}, **labels_models_kwargs) + print(f"Labels2DModel.parse(): {time.time() - start}") # build the matching table version = _parse_version_of_xenium_analyzer(specs) @@ -502,10 +506,7 @@ def _get_labels_and_indices_mapping( logger.warn( f"Could not find the labels ids from the metadata for version {version}. Using a fallback (slower) implementation." ) - import dask.config - - with dask.config.set(num_workers=1): - label_index = get_element_instances(labels).values + label_index = get_element_instances(labels).values if label_index[0] == 0: label_index = label_index[1:] diff --git a/tests/readers/test_utils_image.py b/tests/readers/test_utils_image.py index c2e08230..b6648556 100644 --- a/tests/readers/test_utils_image.py +++ b/tests/readers/test_utils_image.py @@ -3,11 +3,8 @@ from numpy.typing import NDArray from spatialdata_io.readers._utils._image import ( - DEFAULT_CHUNK_SIZE, - Chunks_t, _compute_chunk_sizes_positions, _compute_chunks, - normalize_chunks, ) @@ -65,39 +62,3 @@ def test_compute_chunks( tiles = _compute_chunks(dimensions, chunk_size) assert (tiles == result).all() - - -@pytest.mark.parametrize( - "chunks, axes, expected", - [ - # 2D (y, x) - (None, ("y", "x"), {"y": DEFAULT_CHUNK_SIZE, "x": DEFAULT_CHUNK_SIZE}), - (256, ("y", "x"), {"y": 256, "x": 256}), - ((200, 100), ("x", "y"), {"y": 100, "x": 200}), - ({"y": 300, "x": 400}, ("x", "y"), {"y": 300, "x": 400}), - # 2D with channel (c, y, x) - (None, ("c", "y", "x"), {"c": DEFAULT_CHUNK_SIZE, "y": DEFAULT_CHUNK_SIZE, "x": DEFAULT_CHUNK_SIZE}), - (256, ("c", "y", "x"), {"c": 256, "y": 256, "x": 256}), - ((1, 100, 200), ("c", "y", "x"), {"c": 1, "y": 100, "x": 200}), - ({"c": 1, "y": 300, "x": 400}, ("c", "y", "x"), {"c": 1, "y": 300, "x": 400}), - # 3D (z, y, x) - ((10, 100, 200), ("z", "y", "x"), {"z": 10, "y": 100, "x": 200}), - ({"z": 10, "y": 300, "x": 400}, ("z", "y", "x"), {"z": 10, "y": 300, "x": 400}), - ], -) -def test_normalize_chunks_valid(chunks: Chunks_t, axes: tuple[str, ...], expected: dict[str, int]) -> None: - assert normalize_chunks(chunks, axes=axes) == expected - - -@pytest.mark.parametrize( - "chunks, axes, match", - [ - ({"y": 100}, ("y", "x"), "missing keys for axes"), - ((1, 2, 3), ("y", "x"), "doesn't match axes"), - ((1.5, 2), ("y", "x"), "must be int"), - ("invalid", ("y", "x"), "Unsupported chunks type"), - ], -) -def test_normalize_chunks_errors(chunks: Chunks_t, axes: tuple[str, ...], match: str) -> None: - with pytest.raises(ValueError, match=match): - normalize_chunks(chunks, axes=axes) From 799616716245eef549475a0729b02d65c0964ea1 Mon Sep 17 00:00:00 2001 From: Luca Marconato Date: Wed, 11 Feb 2026 16:16:31 +0100 Subject: [PATCH 7/8] remove print --- src/spatialdata_io/readers/xenium.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/spatialdata_io/readers/xenium.py b/src/spatialdata_io/readers/xenium.py index d5331a59..152e644e 100644 --- a/src/spatialdata_io/readers/xenium.py +++ b/src/spatialdata_io/readers/xenium.py @@ -469,11 +469,7 @@ def _get_labels_and_indices_mapping( z = zarr.open(store, mode="r") # get the labels masks = da.from_array(z["masks"][f"{mask_index}"]) - import time - - start = time.time() labels = Labels2DModel.parse(masks, dims=("y", "x"), transformations={"global": Identity()}, **labels_models_kwargs) - print(f"Labels2DModel.parse(): {time.time() - start}") # build the matching table version = _parse_version_of_xenium_analyzer(specs) From b7f2318e679a1a63ed202bd5d9593d99bd78b45e Mon Sep 17 00:00:00 2001 From: Luca Marconato Date: Wed, 11 Feb 2026 16:38:36 +0100 Subject: [PATCH 8/8] update deps --- pyproject.toml | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 79fa7dcd..6c1e6095 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,18 +5,18 @@ requires = ["hatchling", "hatch-vcs"] [project] name = "spatialdata-io" -dynamic= [ - "version" # allow version to be set by git tags +dynamic = [ + "version" # allow version to be set by git tags ] description = "SpatialData IO for common techs" readme = "README.md" requires-python = ">=3.11" -license = {file = "LICENSE"} +license = { file = "LICENSE" } authors = [ - {name = "scverse"}, + { name = "scverse" }, ] maintainers = [ - {name = "scverse", email = "scverse@scverse.scverse"}, + { name = "scverse", email = "scverse@scverse.scverse" }, ] urls.Documentation = "https://spatialdata-io.readthedocs.io/" urls.Source = "https://github.com/scverse/spatialdata-io" @@ -26,7 +26,7 @@ dependencies = [ "click", "numpy", "scanpy", - "spatialdata>=0.2.6", + "spatialdata>=0.7.3a0", "scikit-image", "h5py", "joblib", @@ -67,7 +67,7 @@ test = [ # update: readthedocs doens't seem to try to install pre-releases even if when trying to install the pre optional-dependency. For # the moment, if needed, let's add the latest pre-release explicitly here. pre = [ - "spatialdata>=0.4.0rc0" + "spatialdata>=0.7.3a0" ] [tool.coverage.run] @@ -80,7 +80,7 @@ omit = [ testpaths = ["tests"] xfail_strict = true addopts = [ - "--import-mode=importlib", # allow using test files with same name + "--import-mode=importlib", # allow using test files with same name ] [tool.ruff] @@ -95,19 +95,19 @@ exclude = [ "setup.py", ] lint.select = [ - "F", # Errors detected by Pyflakes - "E", # Error detected by Pycodestyle - "W", # Warning detected by Pycodestyle - "I", # isort - "D", # pydocstyle - "B", # flake8-bugbear - "TID", # flake8-tidy-imports - "C4", # flake8-comprehensions - "BLE", # flake8-blind-except - "UP", # pyupgrade - "RUF100", # Report unused noqa directives - "TCH", # Typing imports - "NPY", # Numpy specific rules + "F", # Errors detected by Pyflakes + "E", # Error detected by Pycodestyle + "W", # Warning detected by Pycodestyle + "I", # isort + "D", # pydocstyle + "B", # flake8-bugbear + "TID", # flake8-tidy-imports + "C4", # flake8-comprehensions + "BLE", # flake8-blind-except + "UP", # pyupgrade + "RUF100", # Report unused noqa directives + "TCH", # Typing imports + "NPY", # Numpy specific rules # "PTH", # Use pathlib # "S" # Security ]