diff --git a/segmentation/ome-zarr-autosegmentation-plugin/.bumpversion.cfg b/segmentation/ome-zarr-autosegmentation-plugin/.bumpversion.cfg new file mode 100644 index 000000000..b3f972743 --- /dev/null +++ b/segmentation/ome-zarr-autosegmentation-plugin/.bumpversion.cfg @@ -0,0 +1,29 @@ +[bumpversion] +current_version = 0.1.0 +commit = False +tag = False +parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? +serialize = + {major}.{minor}.{patch}-{release}{dev} + {major}.{minor}.{patch} + +[bumpversion:part:release] +optional_value = _ +first_value = dev +values = + dev + _ + +[bumpversion:part:dev] + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" + +[bumpversion:file:VERSION] + +[bumpversion:file:README.md] + +[bumpversion:file:plugin.json] + +[bumpversion:file:src/polus/plugins/images/segmentation/ome_zarr_autosegmentation/__init__.py] diff --git a/segmentation/ome-zarr-autosegmentation-plugin/.dockerignore b/segmentation/ome-zarr-autosegmentation-plugin/.dockerignore new file mode 100644 index 000000000..7c603f814 --- /dev/null +++ b/segmentation/ome-zarr-autosegmentation-plugin/.dockerignore @@ -0,0 +1,4 @@ +.venv +out +tests +__pycache__ diff --git a/segmentation/ome-zarr-autosegmentation-plugin/.gitignore b/segmentation/ome-zarr-autosegmentation-plugin/.gitignore new file mode 100644 index 000000000..d3b40938c --- /dev/null +++ b/segmentation/ome-zarr-autosegmentation-plugin/.gitignore @@ -0,0 +1,8 @@ +poetry.lock +uv.lock +test_datasets/** +**/models/** +autogenerated/** +cachedir/** +provenance/** +output_job.json \ No newline at end of file diff --git a/segmentation/ome-zarr-autosegmentation-plugin/.python-version b/segmentation/ome-zarr-autosegmentation-plugin/.python-version new file mode 100644 index 000000000..2c0733315 --- /dev/null +++ b/segmentation/ome-zarr-autosegmentation-plugin/.python-version @@ -0,0 +1 @@ +3.11 diff --git a/segmentation/ome-zarr-autosegmentation-plugin/CHANGELOG.md b/segmentation/ome-zarr-autosegmentation-plugin/CHANGELOG.md new file mode 100644 index 000000000..b67793f7a --- /dev/null +++ b/segmentation/ome-zarr-autosegmentation-plugin/CHANGELOG.md @@ -0,0 +1,5 @@ +# CHANGELOG + +## 0.1.0 + +Initial release. diff --git a/segmentation/ome-zarr-autosegmentation-plugin/Dockerfile b/segmentation/ome-zarr-autosegmentation-plugin/Dockerfile new file mode 100644 index 000000000..2bb047c5e --- /dev/null +++ b/segmentation/ome-zarr-autosegmentation-plugin/Dockerfile @@ -0,0 +1,35 @@ +# Build stage +FROM python:3.11-slim AS builder + +RUN apt-get update && apt-get install -y \ + gcc \ + g++ \ + python3-dev \ + && apt-get autoremove -y \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Install uv +COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/ + +WORKDIR /app + +COPY pyproject.toml ./ + +RUN uv pip install --system -e . + +COPY . . + +# Final stage +FROM python:3.11-slim + +WORKDIR /app + +COPY --from=builder /app /app + +COPY --from=builder /usr/local/lib/python3.11/site-packages /usr/local/lib/python3.11/site-packages + +WORKDIR /app/src + +ENTRYPOINT ["python3", "-m", "polus.images.segmentation.ome_zarr_autosegmentation"] +CMD ["--help"] diff --git a/segmentation/ome-zarr-autosegmentation-plugin/README.md b/segmentation/ome-zarr-autosegmentation-plugin/README.md new file mode 100644 index 000000000..d9704f836 --- /dev/null +++ b/segmentation/ome-zarr-autosegmentation-plugin/README.md @@ -0,0 +1,23 @@ +# ome_zarr_autosegmentation (0.1.0) + +description goes here + +## Building + +To build the Docker image for the conversion plugin, run `./build-docker.sh`. +Download the model you want to use for SAM from `https://github.com/facebookresearch/sam2?tab=readme-ov-file#download-checkpoints` + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the +contents of `plugin.json` into the pop-up window and submit. + +## Options + +This plugin takes 1 input arguments and 1 output argument: + +| Name | Description | I/O | Type | Default +|---------------|-------------------------|--------|--------| +| inpDir | Input dataset to be processed by this plugin | Input | collection +| preview | Generate an output preview | Input | boolean | False +| outDir | Output collection | Output | collection diff --git a/segmentation/ome-zarr-autosegmentation-plugin/VERSION b/segmentation/ome-zarr-autosegmentation-plugin/VERSION new file mode 100644 index 000000000..6e8bf73aa --- /dev/null +++ b/segmentation/ome-zarr-autosegmentation-plugin/VERSION @@ -0,0 +1 @@ +0.1.0 diff --git a/segmentation/ome-zarr-autosegmentation-plugin/build-docker.sh b/segmentation/ome-zarr-autosegmentation-plugin/build-docker.sh new file mode 100755 index 000000000..6b77f22d4 --- /dev/null +++ b/segmentation/ome-zarr-autosegmentation-plugin/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$(=2.1.0", + "torchvision>=0.16.0", + "greenlet>=3.1.1", + "ome-zarr>=0.9.0", + "bfio>=2.3.6", + "sam2>=1.1.0", + "ngff-zarr[dask-image,tensorstore]>=0.12.2", + "argolid>=0.0.6", + "typer>=0.15.1", +] + +[dependency-groups] +dev = [ + "ruff>=0.8.0", +] + +[tool.ruff.lint] +extend-select = ["I"] diff --git a/segmentation/ome-zarr-autosegmentation-plugin/run-plugin.sh b/segmentation/ome-zarr-autosegmentation-plugin/run-plugin.sh new file mode 100755 index 000000000..f09d908d3 --- /dev/null +++ b/segmentation/ome-zarr-autosegmentation-plugin/run-plugin.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +version=$( None: + """Generate preview of the plugin outputs.""" + + preview = {} + + with Path.open(out_dir / "preview.json", "w") as fw: + json.dump(preview, fw, indent=2) + +@app.command() +def main( + inp_dir: Path = typer.Option( + ..., + "--inpDir", + "-i", + help="Input directory to be processed.", + exists=True, + readable=True, + file_okay=False, + resolve_path=True, + ), + out_dir: Path = typer.Option( + ..., + "--outDir", + "-o", + help="Output directory.", + exists=False, + writable=True, + file_okay=False, + resolve_path=True, + ), + preview: bool = typer.Option( + False, + "--preview", + "-v", + help="Preview of expected outputs (dry-run)", + show_default=False, + ), +): + """ome_zarr_autosegmentation.""" + logger.info(f"inpDir: {inp_dir}") + logger.info(f"outDir: {out_dir}") + + if preview: + generate_preview(inp_dir, out_dir) + logger.info(f"generating preview data in : {out_dir}.") + return + + autosegmentation(inp_dir, out_dir) + + +if __name__ == "__main__": + app() diff --git a/segmentation/ome-zarr-autosegmentation-plugin/src/polus/images/segmentation/ome_zarr_autosegmentation/autosegmentation.py b/segmentation/ome-zarr-autosegmentation-plugin/src/polus/images/segmentation/ome_zarr_autosegmentation/autosegmentation.py new file mode 100644 index 000000000..60c6dfbe9 --- /dev/null +++ b/segmentation/ome-zarr-autosegmentation-plugin/src/polus/images/segmentation/ome_zarr_autosegmentation/autosegmentation.py @@ -0,0 +1,427 @@ +import json +import os +import pathlib +import shutil +from typing import Dict, List, Tuple + +import dask.array as da +import numpy as np +import torch +import zarr +from argolid import PyramidGenerator3D +from ngff_zarr import ( + from_ngff_zarr, +) +from PIL import Image +from sam2.automatic_mask_generator import SAM2AutomaticMaskGenerator +from sam2.build_sam import build_sam2 + +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" + +CHUNK_SIZE = 1024 # Size of each chunk (1024×1024 pixels) +CHUNK_OVERLAP = 100 # Overlap between chunks (100 pixels on each side) +NUM_WORKERS = 1 # Sam2 Automatic mask segmentation cannot do batch + + +def create_segment_properties_info(): + """ + Create an empty segment properties info file compatible with neuroglancer format. + """ + segment_properties = { + "@type": "neuroglancer_segment_properties", + "inline": { + "ids": [], + "properties": [ + {"id": "class", "type": "label", "values": []}, + {"id": "description", "type": "description", "values": []}, + { + "id": "tags", + "type": "tags", + "tags": [ + "verified", + "reviewed", + "uncertain", + "automated", + "manual", + "incomplete", + "merged", + "split", + ], + "tag_descriptions": [ + "Verified by expert annotation", + "Reviewed but may need additional verification", + "Confidence in classification is low", + "Generated by automated pipeline", + "Manually annotated", + "Segmentation may be incomplete", + "Result of merge operation", + "Result of split operation", + ], + "values": [], + }, + ], + }, + } + + return segment_properties + + +def get_device(): + """Get the appropriate device for the current system.""" + if torch.backends.mps.is_available(): + return torch.device("mps") + elif torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def init_sam2_predictor(config_path, checkpoint_path, image_shape=None): + device = get_device() + model = build_sam2(config_path, checkpoint_path, device=str(device)) + + return SAM2AutomaticMaskGenerator(model) + + +def normalize_to_uint8(array): + if array.dtype != np.uint8: + array_min = array.min() + array_max = array.max() + if array_max > array_min: + array = ((array - array_min) * 255 / (array_max - array_min)).astype( + np.uint8 + ) + else: + array = np.zeros_like(array, dtype=np.uint8) + return array + + +def process_chunk(chunk, predictor): + """ + Process a single chunk of an image and generate segmentation mask. + + Args: + chunk: Chunk of image data + predictor: SAM2 predictor + + Returns: + Segmentation mask for the chunk + """ + chunk = normalize_to_uint8(chunk) + + img = Image.fromarray(chunk) + + return generate_segmentation_mask(predictor, img) + + +def generate_segmentation_mask(predictor, image): + img_array = np.array(image) + + if len(img_array.shape) == 2 or ( + len(img_array.shape) == 3 and img_array.shape[2] == 1 + ): + # Stack the single channel three times to create RGB + img_array = np.stack([img_array] * 3, axis=-1) + + if len(img_array.shape) != 3 or img_array.shape[2] != 3: + raise ValueError(f"Unexpected image shape: {img_array.shape}") + + with torch.inference_mode(): + masks = predictor.generate(img_array) + + height, width = img_array.shape[:2] + label_image = np.zeros((height, width), dtype=np.uint16) + + for idx, mask in enumerate(masks, start=1): + label_image[mask["segmentation"]] = idx + + return label_image + + +def initialize_job( + output_path: pathlib.Path, +) -> Tuple[Dict, bool, List[int]]: + """ + Creates directories and sets up progress file + """ + resuming = output_path.exists() + progress_file = output_path / "progress.json" + + if resuming: + print("Found existing output directory. Attempting to resume segmentation.") + with open(progress_file, "r") as f: + progress = json.load(f) + processed_slices = progress.get("processed_slices", []) + else: + print("Starting new segmentation job") + output_path.mkdir(parents=True) + processed_slices = [] + progress = {"processed_slices": processed_slices, "total_slices": 0} + with open(progress_file, "w") as f: + json.dump(progress, f) + + return progress, resuming, processed_slices + + +def initialize_zarr_store( + output_path: pathlib.Path, original_dataset_path: pathlib.Path, dtype=np.uint16 +) -> zarr.Group: + """ + Copy the original dataset to the output location, which we will modify later + """ + print(f"Copying original dataset from {original_dataset_path} to {output_path}") + + # Copy the original dataset to the output location + if original_dataset_path != output_path: + for item in original_dataset_path.iterdir(): + s = original_dataset_path / item.name + d = output_path / item.name + if os.path.isdir(s): + shutil.copytree(s, d, dirs_exist_ok=True) + else: + shutil.copy2(s, d) + + # Initialize segment properties + segment_properties = create_segment_properties_info() + info_path = output_path / "info" + with open(info_path, "w") as f: + json.dump(segment_properties, f, indent=2) + + # Open the zarr store at the output location + store = zarr.DirectoryStore(str(output_path), dimension_separator="/") + root = zarr.group(store) + + return root + + +def update_segment_properties( + output_path: pathlib.Path, segmentation: np.ndarray +) -> None: + """ + Update segment properties file with new segments from a segmentation mask. + + Args: + output_path: Path to the output directory + segmentation: Segmentation mask with new segments + """ + info_path = output_path / "info" + with open(info_path, "r") as f: + segment_properties = json.load(f) + + # Add any new segments from this slice + unique_ids = np.unique(segmentation) + unique_ids = unique_ids[unique_ids > 0] + + str_ids = [str(int(id)) for id in unique_ids] + current_str_ids = segment_properties["inline"]["ids"] + + # Add any new IDs not already in the properties + new_ids = [id for id in str_ids if id not in current_str_ids] + if new_ids: + for id in new_ids: + segment_properties["inline"]["ids"].append(id) + segment_properties["inline"]["properties"][0]["values"].append( + f"segment {id}" + ) + segment_properties["inline"]["properties"][1]["values"].append( + f"Automatically generated segment {id}" + ) + segment_properties["inline"]["properties"][2]["values"].append( + [3] + ) # Automated tag + + # Write updated properties + with open(info_path, "w") as f: + json.dump(segment_properties, f, indent=2) + + +def update_progress( + output_path: pathlib.Path, + progress: Dict, + processed_slices: List[int], + slice_index: int, +) -> None: + """ + Update the progress file with newly processed slice. + + Args: + output_path: Path to the output directory + progress: Progress dictionary + processed_slices: List of processed slice indices + slice_index: Index of the newly processed slice + """ + if slice_index not in processed_slices: + processed_slices.append(slice_index) + progress["processed_slices"] = processed_slices + progress_file = output_path / "progress.json" + with open(progress_file, "w") as f: + json.dump(progress, f) + + +def process_slice( + volume: np.ndarray, + slice_index: int, + predictor: SAM2AutomaticMaskGenerator, + zarr_root: zarr.Group, + ndim: int, + output_path: pathlib.Path, + progress: Dict, + processed_slices: List[int], + num_slices: int, +) -> None: + """ + Process a single slice and update zarr store. + + Args: + volume: Volume data + slice_index: Index of the slice to process + predictor: SAM2 predictor + zarr_root: Zarr root group + ndim: Number of dimensions + output_path: Path to the output directory + progress: Progress dictionary + processed_slices: List of processed slice indices + num_slices: Total number of slices + """ + if slice_index in processed_slices: + print(f"Skipping already processed slice {slice_index}/{num_slices}") + return + + # Get the slice and compute + slice_data = volume[slice_index] + # Check if the slice is small enough to process directly + if slice_data.shape[0] <= CHUNK_SIZE and slice_data.shape[1] <= CHUNK_SIZE: + slice_data = np.array(slice_data) + slice_data = normalize_to_uint8(slice_data) + img = Image.fromarray(slice_data) + segmentation = generate_segmentation_mask(predictor, img) + else: + # For large images, use dask map_overlap to process in chunks + print( + f"Processing slice {slice_index} in {CHUNK_SIZE}×{CHUNK_SIZE} chunks with {CHUNK_OVERLAP} pixel overlap" + ) + + # Use map_overlap to process the image in chunks with overlap + chunked_result = da.map_overlap( + process_chunk, + slice_data, + depth={0: CHUNK_OVERLAP, 1: CHUNK_OVERLAP}, + boundary="reflect", + dtype=np.uint16, + chunks=(CHUNK_SIZE, CHUNK_SIZE), + predictor=predictor, + ) + + # Compute the final result + segmentation = chunked_result.compute(scheduler="synchronous") + + zarr_array = zarr_root["0"] + z = zarr.open(zarr_array.store, path=zarr_array.path, mode="r+") + + # Update the specific slice in the zarr array to save progress + if ndim == 5: # (T, C, Z, Y, X) + z[0, 0, slice_index] = segmentation + else: # (C, Z, Y, X) + z[0, slice_index] = segmentation + + # Update segment properties + update_segment_properties(output_path, segmentation) + + # Update progress + update_progress(output_path, progress, processed_slices, slice_index) + + # Print progress + if slice_index % 10 == 0 or slice_index == num_slices - 1: + print( + f"Processed slice {slice_index}/{num_slices} ({len(processed_slices)}/{num_slices} complete)" + ) + + +def autosegment_dataset(input_dir: pathlib.Path, output_path: pathlib.Path): + progress, resuming, processed_slices = initialize_job(output_path) + + multiscales = from_ngff_zarr(input_dir) + image_data = multiscales.images[0] # Get the highest resolution image + + ndim = len(image_data.data.shape) + print(f"Dataset shape: {image_data.data.shape}") + print(f"Data chunks: {image_data.data.chunks}") + + # Extract the correct volume based on dimensionality + if ndim == 5: # Typically (T, C, Z, Y, X) + print("5D dataset detected (T, C, Z, Y, X)") + volume = image_data.data[0, 0] + slice_shape = (volume[0].shape[0], volume[0].shape[1]) + elif ndim == 4: # Typically (C, Z, Y, X) + print("4D dataset detected (C, Z, Y, X)") + volume = image_data.data[0] + slice_shape = (volume[0].shape[0], volume[0].shape[1]) + else: + raise ValueError(f"Unexpected number of dimensions: {ndim}") + + num_slices = volume.shape[0] + print(f"Processing {num_slices} Z-slices from channel") + + if not resuming or progress.get("total_slices", 0) != num_slices: + progress["total_slices"] = num_slices + with open(output_path / "progress.json", "w") as f: + json.dump(progress, f) + + sam2_predictor = init_sam2_predictor( + "configs/sam2.1/sam2.1_hiera_s.yaml", + "models/sam2.1_hiera_small.pt", + image_shape=slice_shape, + ) + + if not resuming: + root = initialize_zarr_store( + output_path, + input_dir, + ) + else: + store = zarr.DirectoryStore(str(output_path), dimension_separator="/") + root = zarr.group(store) + + for z in range(num_slices): + process_slice( + volume, + z, + sam2_predictor, + root, + ndim, + output_path, + progress, + processed_slices, + num_slices, + ) + + zarr_loc_dir = str(output_path) + base_scale_key = 0 + num_levels = len(multiscales.images) + + pyramid_gen = PyramidGenerator3D(zarr_loc_dir, base_scale_key) + pyramid_gen.generate_pyramid(num_levels) + + # Set permissions recursively on the output directory + print(f"Setting permissions on {output_path}") + for root_dir, dirs, files in os.walk(str(output_path)): + for dir_name in dirs: + dir_path = os.path.join(root_dir, dir_name) + os.chmod(dir_path, 0o755) + + # Set 0755 (drwxr-xr-x) permissions on files + for file_name in files: + file_path = os.path.join(root_dir, file_name) + os.chmod(file_path, 0o755) + + +def autosegmentation(input_dir: pathlib.Path, output_dir: pathlib.Path): + """ome_zarr_autosegmentation. + + Args: + input_dir: input directory to process + output_dir: output ome-zarr directory + Returns: + None + """ + autosegment_dataset(input_dir, output_dir) diff --git a/segmentation/ome-zarr-autosegmentation-plugin/tests/__init__.py b/segmentation/ome-zarr-autosegmentation-plugin/tests/__init__.py new file mode 100644 index 000000000..e665adfe7 --- /dev/null +++ b/segmentation/ome-zarr-autosegmentation-plugin/tests/__init__.py @@ -0,0 +1 @@ +"""Tests for ome_zarr_autosegmentation.""" diff --git a/segmentation/ome-zarr-autosegmentation-plugin/tests/conftest.py b/segmentation/ome-zarr-autosegmentation-plugin/tests/conftest.py new file mode 100644 index 000000000..fd0c32168 --- /dev/null +++ b/segmentation/ome-zarr-autosegmentation-plugin/tests/conftest.py @@ -0,0 +1,147 @@ +"""Test fixtures. + +Set up all data used in tests. +""" +import tempfile +import shutil +from pathlib import Path +import numpy as np +import pytest +import itertools + +from bfio import BioWriter, BioReader + +def pytest_addoption(parser: pytest.Parser) -> None: + """Add options to pytest.""" + parser.addoption( + "--downloads", + action="store_true", + dest="downloads", + default=False, + help="run tests that download large data files", + ) + parser.addoption( + "--slow", + action="store_true", + dest="slow", + default=False, + help="run slow tests", + ) + + + + +IMAGE_SIZES = [(1024 * (2**i) ,1024 * (2**i)) for i in range(1, 2)] +LARGE_IMAGE_SIZES = [(1024 * (2**i) ,1024 * (2**i)) for i in range(4, 5)] +PIXEL_TYPES = [np.uint8, float] +PARAMS = [ + (image_size, pixel_type) + for image_size, pixel_type in itertools.product( + IMAGE_SIZES, PIXEL_TYPES + ) +] +LARGE_DATASET_PARAMS = [ + (image_size, pixel_type) + for image_size, pixel_type in itertools.product( + LARGE_IMAGE_SIZES, PIXEL_TYPES + ) +] + + +FixtureReturnType = tuple[ + Path, # input dir + Path, # output dir + Path, # ground truth path + Path, # input image path + Path, # ground truth path +] + + +@pytest.fixture(params=PARAMS) +def generate_test_data(request: pytest.FixtureRequest) -> FixtureReturnType: + """Generate staging temporary directories with test data and ground truth.""" + + # collect test params + image_size, pixel_type = request.param + test_data = _generate_test_data(image_size, pixel_type) + print(test_data) + yield from test_data + + +@pytest.fixture(params=LARGE_DATASET_PARAMS) +def generate_large_test_data(request: pytest.FixtureRequest) -> FixtureReturnType: + """Generate staging temporary directories with test data and ground truth.""" + + # collect test params + image_size, pixel_type = request.param + test_data =_generate_test_data(image_size, pixel_type) + + print(test_data) + + yield from test_data + + +def _generate_test_data(image_size : tuple[int,int], pixel_type: int) -> FixtureReturnType: + """Generate staging temporary directories with test data and ground truth.""" + + image_x, image_y = image_size + + # staging area + data_dir = Path(tempfile.mkdtemp(suffix="_data_dir")) + inp_dir = data_dir.joinpath("inp_dir") + inp_dir.mkdir(exist_ok=True) + out_dir = data_dir.joinpath("out_dir") + out_dir.mkdir(exist_ok=True) + ground_truth_dir = data_dir.joinpath("ground_truth_dir") + ground_truth_dir.mkdir(exist_ok=True) + + # generate image and ground_truth + img_path = inp_dir.joinpath("img.ome.tif") + image = gen_2D_image(img_path, image_x, image_y, pixel_type) + ground_truth_path = ground_truth_dir.joinpath("ground_truth.ome.tif") + gen_ground_truth(img_path, ground_truth_path) + + yield inp_dir, out_dir, ground_truth_dir, img_path, ground_truth_path + + shutil.rmtree(data_dir) + +def gen_2D_image( + img_path, + image_x, + image_y, + pixel_type +) : + """Generate a random 2D square image.""" + + if np.issubdtype(pixel_type, np.floating) : + rng = np.random.default_rng() + image = rng.uniform(0.0, 1.0, + size=(image_y, image_x) + ).astype(pixel_type) + else: + image = np.random.randint(0, 255, size=(image_y, image_x)) + + with BioWriter(img_path) as writer: + (y, x) = image.shape + writer.Y = y + writer.X = x + writer.Z = 1 + writer.C = 1 + writer.T = 1 + writer.dtype = image.dtype + writer[:] = image[:] + + return image + + +def gen_ground_truth(img_path : Path, ground_truth_path : Path): + """generate some ground truth from the image data. + Here we generate a simple binary mask. + """ + + with BioReader(img_path) as reader: + with BioWriter(ground_truth_path, metadata=reader.metadata) as writer: + ground_truth = np.asarray(reader[:] != 0) + writer[:] = ground_truth + + return ground_truth \ No newline at end of file diff --git a/segmentation/ome-zarr-autosegmentation-plugin/tests/test_cli.py b/segmentation/ome-zarr-autosegmentation-plugin/tests/test_cli.py new file mode 100644 index 000000000..6a580a43a --- /dev/null +++ b/segmentation/ome-zarr-autosegmentation-plugin/tests/test_cli.py @@ -0,0 +1,96 @@ +"""Testing the Command Line Tool.""" + +import faulthandler +import json +from pathlib import Path +from typer.testing import CliRunner + +from .conftest import FixtureReturnType + +from polus.plugins.images.segmentation.ome_zarr_autosegmentation.__main__ import app + +faulthandler.enable() + + +def test_cli(generate_test_data : FixtureReturnType) -> None: # noqa + """Test the command line.""" + inp_dir, out_dir, ground_truth_dir, img_path, ground_truth_path = generate_test_data #noqa + + runner = CliRunner() + + result = runner.invoke( + app, + [ + "--inpDir", + inp_dir, + "--outDir", + out_dir, + ], + ) + + assert result.exit_code == 0 + +def test_cli_short(generate_test_data : FixtureReturnType): # noqa + """Test the command line.""" + runner = CliRunner() + + inp_dir, out_dir, _, _, _ = generate_test_data #noqa + + result = runner.invoke( + app, + [ + "-i", + inp_dir, + "-o", + out_dir, + ], + ) + + assert result.exit_code == 0 + +def test_cli_preview(generate_test_data : FixtureReturnType): # noqa + """Test the preview option.""" + runner = CliRunner() + + inp_dir, out_dir, _, _, _ = generate_test_data #noqa + + + result = runner.invoke( + app, + [ + "--inpDir", + inp_dir, + "--outDir", + out_dir, + "--preview", + ], + ) + + assert result.exit_code == 0 + + with Path.open(out_dir / "preview.json") as file: + plugin_json = json.load(file) + + # verify we generate the preview file + assert plugin_json == {} + + +def test_cli_bad_input(generate_test_data : FixtureReturnType): # noqa + """Test bad inputs.""" + runner = CliRunner() + + inp_dir, out_dir, _, _, _ = generate_test_data #noqa + # replace with a bad path + inp_dir = "/does_not_exists" + + result = runner.invoke( + app, + [ + "--inpDir", + inp_dir, + "--outDir", + out_dir, + ], + ) + + assert result.exc_info[0] is SystemExit diff --git a/segmentation/ome-zarr-autosegmentation-plugin/tests/test_ome_zarr_autosegmentation.py b/segmentation/ome-zarr-autosegmentation-plugin/tests/test_ome_zarr_autosegmentation.py new file mode 100644 index 000000000..29e017936 --- /dev/null +++ b/segmentation/ome-zarr-autosegmentation-plugin/tests/test_ome_zarr_autosegmentation.py @@ -0,0 +1,22 @@ +"""Tests for ome_zarr_autosegmentation.""" + +import pytest +from polus.plugins.images.segmentation.ome_zarr_autosegmentation.ome_zarr_autosegmentation import ( + ome_zarr_autosegmentation, +) +from .conftest import FixtureReturnType + + +def test_ome_zarr_autosegmentation(generate_test_data : FixtureReturnType): + """Test ome_zarr_autosegmentation.""" + inp_dir, out_dir, ground_truth_dir, img_path, ground_truth_path = generate_test_data + filepattern = ".*" + assert ome_zarr_autosegmentation(inp_dir, filepattern, out_dir) == None + + +@pytest.mark.skipif("not config.getoption('slow')") +def test_ome_zarr_autosegmentation(generate_large_test_data : FixtureReturnType): + """Test ome_zarr_autosegmentation.""" + inp_dir, out_dir, ground_truth_dir, img_path, ground_truth_path = generate_large_test_data + filepattern = ".*" + assert ome_zarr_autosegmentation(inp_dir, filepattern, out_dir) == None \ No newline at end of file