Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

bioimageio.core.contrib #316

Closed
wants to merge 24 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -142,3 +142,19 @@ jobs:
run: |
conda install -n base -c conda-forge conda-build pip -y
conda build -c conda-forge conda-recipe

generate_workflow_rdfs: # todo: move to contrib repo
runs-on: ubuntu-latest
needs: test
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.9
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install dependencies
# todo: use mamba envs
run: |
pip install . dask
- name: Generate workflow RDFs
run: python scripts/generate_workflow_rdfs.py contrib_a
13 changes: 7 additions & 6 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
build/
dist/
.idea/
*.egg-info/
cache
**/tmp
.tox/
*.egg-info/
*.pyc
.idea/
.mypy_cache/
.tox/
build/
cache
dist/
2 changes: 1 addition & 1 deletion bioimageio/core/VERSION
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
"version": "0.5.7"
"version": "0.5.8"
}
8 changes: 3 additions & 5 deletions bioimageio/core/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,14 @@
import sys
import warnings
from glob import glob

from pathlib import Path
from pprint import pformat, pprint
from pprint import pformat
from typing import List, Optional

import typer

from bioimageio.core import __version__, prediction, commands, resource_tests, load_raw_resource_description
from bioimageio.core import __version__, commands, prediction, resource_tests
from bioimageio.core.common import TestSummary
from bioimageio.core.prediction_pipeline import get_weight_formats
from bioimageio.spec.__main__ import app, help_version as help_version_spec
from bioimageio.spec.model.raw_nodes import WeightsFormat

Expand Down Expand Up @@ -244,7 +242,7 @@ def predict_images(
tiling = json.loads(tiling.replace("'", '"'))
assert isinstance(tiling, dict)

# this is a weird typer bug: default devices are empty tuple although they should be None
# this is a weird typer bug: default devices are empty tuple, although they should be None
if len(devices) == 0:
devices = None
prediction.predict_images(
Expand Down
8 changes: 4 additions & 4 deletions bioimageio/core/build_spec/build_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
import bioimageio.spec.model as model_spec
from bioimageio.core import export_resource_package, load_raw_resource_description
from bioimageio.core.resource_io.nodes import URI
from bioimageio.spec.shared.raw_nodes import ImportableModule, ImportableSourceFile
from bioimageio.spec.shared.raw_nodes import CallableFromModule, CallableFromSourceFile
from bioimageio.spec.shared import resolve_local_source, resolve_source

try:
Expand Down Expand Up @@ -58,12 +58,12 @@ def _get_pytorch_state_dict_weight_kwargs(architecture, model_kwargs, root):
# note: path itself might include : for absolute paths in windows
*arch_file_parts, callable_name = architecture.replace("::", ":").split(":")
arch_file = _ensure_local(":".join(arch_file_parts), root)
arch = ImportableSourceFile(callable_name, arch_file)
arch = CallableFromSourceFile(callable_name, arch_file)
arch_hash = _get_hash(root / arch.source_file)
weight_kwargs["architecture_sha256"] = arch_hash
else:
arch = spec.shared.fields.ImportableSource().deserialize(architecture)
assert isinstance(arch, ImportableModule)
arch = spec.shared.fields.CallableSource().deserialize(architecture)
assert isinstance(arch, CallableFromModule)

weight_kwargs["architecture"] = arch
return weight_kwargs, tmp_archtecture
Expand Down
30 changes: 20 additions & 10 deletions bioimageio/core/image_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
#


def transform_input_image(image: np.ndarray, tensor_axes: str, image_axes: Optional[str] = None):
def transform_input_image(image: np.ndarray, tensor_axes: Sequence[str], image_axes: Optional[str] = None):
"""Transform input image into output tensor with desired axes.

Args:
Expand All @@ -35,7 +35,16 @@ def transform_input_image(image: np.ndarray, tensor_axes: str, image_axes: Optio
image_axes = "bczyx"
else:
raise ValueError(f"Invalid number of image dimensions: {ndim}")
tensor = DataArray(image, dims=tuple(image_axes))

# instead of 'b' we might want 'batch', etc...
axis_letter_map = {
letter: name
for letter, name in {"b": "batch", "c": "channel", "i": "index", "t": "time"}
if name in tensor_axes # only do this mapping if the full name is in the desired tensor_axes
}
image_axes = tuple(axis_letter_map.get(a, a) for a in image_axes)

tensor = DataArray(image, dims=image_axes)
# expand the missing image axes
missing_axes = tuple(set(tensor_axes) - set(image_axes))
tensor = tensor.expand_dims(dim=missing_axes)
Expand Down Expand Up @@ -75,9 +84,10 @@ def transform_output_tensor(tensor: np.ndarray, tensor_axes: str, output_axes: s


def to_channel_last(image):
chan_id = image.dims.index("c")
c = "c" if "c" in image.dims else "channel"
chan_id = image.dims.index(c)
if chan_id != image.ndim - 1:
target_axes = tuple(ax for ax in image.dims if ax != "c") + ("c",)
target_axes = tuple(ax for ax in image.dims if ax != c) + (c,)
image = image.transpose(*target_axes)
return image

Expand All @@ -95,27 +105,27 @@ def load_image(in_path, axes: Sequence[str]) -> DataArray:
is_volume = "z" in axes
im = imageio.volread(in_path) if is_volume else imageio.imread(in_path)
im = transform_input_image(im, axes)
return DataArray(im, dims=axes)
return DataArray(im, dims=tuple(axes))


def load_tensors(sources, tensor_specs: List[Union[InputTensor, OutputTensor]]) -> List[DataArray]:
return [load_image(s, sspec.axes) for s, sspec in zip(sources, tensor_specs)]


def save_image(out_path, image):
ext = os.path.splitext(out_path)[1]
def save_image(out_path: os.PathLike, image):
ext = os.path.splitext(str(out_path))[1]
if ext == ".npy":
np.save(out_path, image)
np.save(str(out_path), image)
else:
is_volume = "z" in image.dims

# squeeze batch or channel axes if they are singletons
squeeze = {ax: 0 if (ax in "bc" and sh == 1) else slice(None) for ax, sh in zip(image.dims, image.shape)}
image = image[squeeze]

if "b" in image.dims:
if "b" in image.dims or "batch" in image.dims:
raise RuntimeError(f"Cannot save prediction with batchsize > 1 as {ext}-file")
if "c" in image.dims: # image formats need channel last
if "c" in image.dims or "channel" in image.dims: # image formats need channel last
image = to_channel_last(image)

save_function = imageio.volsave if is_volume else imageio.imsave
Expand Down
16 changes: 8 additions & 8 deletions bioimageio/core/prediction_pipeline/_combined_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,30 +28,30 @@ class ProcessingInfo:

class CombinedProcessing:
def __init__(self, combine_tensors: Dict[TensorName, ProcessingInfo]):
self._procs = []
self.procs = []
known = dict(KNOWN_PROCESSING["pre"])
known.update(KNOWN_PROCESSING["post"])

# ensure all tensors have correct data type before any processing
for tensor_name, info in combine_tensors.items():
if info.assert_dtype_before is not None:
self._procs.append(AssertDtype(tensor_name=tensor_name, dtype=info.assert_dtype_before))
self.procs.append(AssertDtype(tensor_name=tensor_name, dtype=info.assert_dtype_before))

if info.ensure_dtype_before is not None:
self._procs.append(EnsureDtype(tensor_name=tensor_name, dtype=info.ensure_dtype_before))
self.procs.append(EnsureDtype(tensor_name=tensor_name, dtype=info.ensure_dtype_before))

for tensor_name, info in combine_tensors.items():
for step in info.steps:
self._procs.append(known[step.name](tensor_name=tensor_name, **step.kwargs))
self.procs.append(known[step.name](tensor_name=tensor_name, **step.kwargs))

if info.assert_dtype_after is not None:
self._procs.append(AssertDtype(tensor_name=tensor_name, dtype=info.assert_dtype_after))
self.procs.append(AssertDtype(tensor_name=tensor_name, dtype=info.assert_dtype_after))

# ensure tensor has correct data type right after its processing
if info.ensure_dtype_after is not None:
self._procs.append(EnsureDtype(tensor_name=tensor_name, dtype=info.ensure_dtype_after))
self.procs.append(EnsureDtype(tensor_name=tensor_name, dtype=info.ensure_dtype_after))

self.required_measures: RequiredMeasures = self._collect_required_measures(self._procs)
self.required_measures: RequiredMeasures = self._collect_required_measures(self.procs)
self.tensor_names = list(combine_tensors)

@classmethod
Expand Down Expand Up @@ -85,7 +85,7 @@ def from_tensor_specs(cls, tensor_specs: List[Union[nodes.InputTensor, nodes.Out
return inst

def apply(self, sample: Sample, computed_measures: ComputedMeasures) -> None:
for proc in self._procs:
for proc in self.procs:
proc.set_computed_measures(computed_measures)
sample[proc.tensor_name] = proc.apply(sample[proc.tensor_name])

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def _unload(self) -> None:
def get_nn_instance(model_node: nodes.Model, **kwargs):
weight_spec = model_node.weights.get("pytorch_state_dict")
assert weight_spec is not None
assert isinstance(weight_spec.architecture, nodes.ImportedSource)
assert isinstance(weight_spec.architecture, nodes.ImportedCallable)
model_kwargs = weight_spec.kwargs
joined_kwargs = {} if model_kwargs is missing else dict(model_kwargs)
joined_kwargs.update(kwargs)
Expand Down
13 changes: 8 additions & 5 deletions bioimageio/core/prediction_pipeline/_prediction_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,12 @@ def __init__(
self._output_specs = bioimageio_model.outputs
else:
assert isinstance(bioimageio_model, raw_nodes.Model)
self._input_specs = [resolve_raw_node(s, nodes) for s in bioimageio_model.inputs]
self._output_specs = [resolve_raw_node(s, nodes) for s in bioimageio_model.outputs]
self._input_specs = [
resolve_raw_node(s, nodes, root_path=bioimageio_model.root_path) for s in bioimageio_model.inputs
]
self._output_specs = [
resolve_raw_node(s, nodes, root_path=bioimageio_model.root_path) for s in bioimageio_model.outputs
]

self._preprocessing = preprocessing
self._postprocessing = postprocessing
Expand Down Expand Up @@ -207,11 +211,10 @@ def create_prediction_pipeline(
if isinstance(bioimageio_model, nodes.Model):
ipts = bioimageio_model.inputs
outs = bioimageio_model.outputs

else:
assert isinstance(bioimageio_model, raw_nodes.Model)
ipts = [resolve_raw_node(s, nodes) for s in bioimageio_model.inputs]
outs = [resolve_raw_node(s, nodes) for s in bioimageio_model.outputs]
ipts = [resolve_raw_node(s, nodes, root_path=bioimageio_model.root_path) for s in bioimageio_model.inputs]
outs = [resolve_raw_node(s, nodes, root_path=bioimageio_model.root_path) for s in bioimageio_model.outputs]

preprocessing = CombinedProcessing.from_tensor_specs(ipts)

Expand Down
5 changes: 2 additions & 3 deletions bioimageio/core/prediction_pipeline/_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,8 @@
see https://github.com/bioimage-io/spec-bioimage-io/blob/gh-pages/preprocessing_spec_latest.md
and https://github.com/bioimage-io/spec-bioimage-io/blob/gh-pages/postprocessing_spec_latest.md
"""
import numbers
from dataclasses import InitVar, dataclass, field, fields
from typing import List, Mapping, Optional, Sequence, Tuple, Type, Union
from dataclasses import dataclass, field, fields
from typing import Mapping, Optional, Sequence, Tuple, Type, Union

import numpy
import numpy as np
Expand Down
Loading