diff --git a/README.md b/README.md index 333b5fab..71349572 100644 --- a/README.md +++ b/README.md @@ -375,6 +375,24 @@ The model specification and its validation tools can be found at -
+
+ + + +   + Module Index + + Contents

Submodules

API Documentation

@@ -137,6 +158,9 @@

API Documentation

  • dump_description
  • +
  • + enable_determinism +
  • load_dataset_description
  • @@ -347,7 +371,7 @@

    API Documentation

    -
    bioimageio.core 0.6.10
    +
    bioimageio.core 0.7.0
    built with pdocGet started ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ source https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/powerful-chipmunk/1/files/rdf.yaml format version model 0.4.10 - bioimageio.spec 0.5.3post4 + bioimageio.spec 0.5.3post4 bioimageio.core 0.6.8 @@ -412,7 +436,7 @@

    Get started

    ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ ✔️ initialized ModelDescr to describe model 0.4.10 - ✔️ bioimageio.spec format validation model 0.4.10 + ✔️ bioimageio.spec format validation model 0.4.10 🔍 context.perform_io_checks True 🔍 context.root https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/powerful-chipmunk/1/files 🔍 context.known_files.weights.pt 3bd9c518c8473f1e35abb7624f82f3aa92f1015e66fb1f6a9d08444e1f2f5698 @@ -442,7 +466,7 @@

    Get started

    ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ source https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/impartial-shrimp/1.1/files/rdf.yaml format version model 0.5.3 - bioimageio.spec 0.5.3.2 + bioimageio.spec 0.5.3.2 bioimageio.core 0.6.9 @@ -451,7 +475,7 @@

    Get started

    ✔️ initialized ModelDescr to describe model 0.5.3 - ✔️ bioimageio.spec format validation model 0.5.3 + ✔️ bioimageio.spec format validation model 0.5.3 🔍 context.perform_io_checks False 🔍 context.warning_level error @@ -742,7 +766,7 @@

    🐍 Use in Python

    bioimageio.core is a python package that implements prediction with bioimageio models including standardized pre- and postprocessing operations. -These models are described by---and can be loaded with---the bioimageio.spec package.

    +These models are described by---and can be loaded with---the bioimageio.spec package.

    In addition bioimageio.core provides functionality to convert model weight formats.

    @@ -756,7 +780,7 @@

    🐍 Use in Python

    Logging level

    -

    bioimageio.spec and bioimageio.core use loguru for logging, hence the logging level +

    bioimageio.spec and bioimageio.core use loguru for logging, hence the logging level may be controlled with the LOGURU_LEVEL environment variable.

    Model Specification

    @@ -765,6 +789,34 @@

    Model Specification

    Changelog

    +

    0.7.0

    + +
      +
    • breaking: +
        +
      • bioimageio CLI now has implicit boolean flags
      • +
    • +
    • non-breaking: +
        +
      • use new ValidationDetail.recommended_env in ValidationSummary
      • +
      • improve get_io_sample_block_metas() +
          +
        • now works for sufficiently large, but not exactly shaped inputs
        • +
      • +
      • update to support zipfile.ZipFile object with bioimageio.spec==0.5.3.5
      • +
      • add io helpers resolve and resolve_and_extract
      • +
      • added enable_determinism function and determinism input argument for testing with seeded +random generators and optionally (determinsim=="full") instructing DL frameworks to use +deterministic algorithms.
      • +
    • +
    + +

    0.6.10

    + +
      +
    • fix #423
    • +
    +

    0.6.9

      @@ -830,7 +882,7 @@

      0.6.1

      0.6.0

        -
      • add compatibility with new bioimageio.spec 0.5 (0.5.2post1)
      • +
      • add compatibility with new bioimageio.spec 0.5 (0.5.2post1)
      • improve interfaces
      @@ -845,78 +897,117 @@

      0.5.10

      -
       1"""
      - 2.. include:: ../../README.md
      - 3"""
      - 4
      - 5from bioimageio.core.stat_measures import Stat
      - 6from bioimageio.spec import (
      - 7    build_description,
      - 8    dump_description,
      - 9    load_dataset_description,
      -10    load_description,
      -11    load_description_and_validate_format_only,
      -12    load_model_description,
      -13    save_bioimageio_package,
      -14    save_bioimageio_package_as_folder,
      -15    save_bioimageio_yaml_only,
      -16    validate_format,
      -17)
      -18
      -19from . import digest_spec
      -20from ._prediction_pipeline import PredictionPipeline, create_prediction_pipeline
      -21from ._resource_tests import load_description_and_test, test_description, test_model
      -22from ._settings import settings
      -23from .axis import Axis, AxisId
      -24from .block_meta import BlockMeta
      -25from .common import MemberId
      -26from .prediction import predict, predict_many
      -27from .sample import Sample
      -28from .stat_calculators import compute_dataset_measures
      -29from .tensor import Tensor
      -30from .utils import VERSION
      -31
      -32__version__ = VERSION
      -33
      -34
      -35# aliases
      -36test_resource = test_description
      -37load_resource = load_description
      -38load_model = load_model_description
      -39
      -40__all__ = [
      -41    "__version__",
      -42    "Axis",
      -43    "AxisId",
      -44    "BlockMeta",
      -45    "build_description",
      -46    "compute_dataset_measures",
      -47    "create_prediction_pipeline",
      -48    "digest_spec",
      -49    "dump_description",
      -50    "load_dataset_description",
      -51    "load_description_and_test",
      -52    "load_description_and_validate_format_only",
      -53    "load_description",
      -54    "load_model_description",
      -55    "load_model",
      -56    "load_resource",
      -57    "MemberId",
      -58    "predict_many",
      -59    "predict",
      -60    "PredictionPipeline",
      -61    "Sample",
      -62    "save_bioimageio_package_as_folder",
      -63    "save_bioimageio_package",
      -64    "save_bioimageio_yaml_only",
      -65    "settings",
      -66    "Stat",
      -67    "Tensor",
      -68    "test_description",
      -69    "test_model",
      -70    "test_resource",
      -71    "validate_format",
      -72]
      +                        
        1"""
      +  2.. include:: ../../README.md
      +  3"""
      +  4
      +  5from bioimageio.spec import (
      +  6    build_description,
      +  7    dump_description,
      +  8    load_dataset_description,
      +  9    load_description,
      + 10    load_description_and_validate_format_only,
      + 11    load_model_description,
      + 12    save_bioimageio_package,
      + 13    save_bioimageio_package_as_folder,
      + 14    save_bioimageio_yaml_only,
      + 15    validate_format,
      + 16)
      + 17
      + 18from . import (
      + 19    axis,
      + 20    block_meta,
      + 21    cli,
      + 22    commands,
      + 23    common,
      + 24    digest_spec,
      + 25    io,
      + 26    model_adapters,
      + 27    prediction,
      + 28    proc_ops,
      + 29    proc_setup,
      + 30    sample,
      + 31    stat_calculators,
      + 32    stat_measures,
      + 33    tensor,
      + 34)
      + 35from ._prediction_pipeline import PredictionPipeline, create_prediction_pipeline
      + 36from ._resource_tests import (
      + 37    enable_determinism,
      + 38    load_description_and_test,
      + 39    test_description,
      + 40    test_model,
      + 41)
      + 42from ._settings import settings
      + 43from .axis import Axis, AxisId
      + 44from .block_meta import BlockMeta
      + 45from .common import MemberId
      + 46from .prediction import predict, predict_many
      + 47from .sample import Sample
      + 48from .stat_calculators import compute_dataset_measures
      + 49from .stat_measures import Stat
      + 50from .tensor import Tensor
      + 51from .utils import VERSION
      + 52
      + 53__version__ = VERSION
      + 54
      + 55
      + 56# aliases
      + 57test_resource = test_description
      + 58"""alias of `test_description`"""
      + 59load_resource = load_description
      + 60"""alias of `load_description`"""
      + 61load_model = load_model_description
      + 62"""alias of `load_model_description`"""
      + 63
      + 64__all__ = [
      + 65    "__version__",
      + 66    "axis",
      + 67    "Axis",
      + 68    "AxisId",
      + 69    "block_meta",
      + 70    "BlockMeta",
      + 71    "build_description",
      + 72    "cli",
      + 73    "commands",
      + 74    "common",
      + 75    "compute_dataset_measures",
      + 76    "create_prediction_pipeline",
      + 77    "digest_spec",
      + 78    "dump_description",
      + 79    "enable_determinism",
      + 80    "io",
      + 81    "load_dataset_description",
      + 82    "load_description_and_test",
      + 83    "load_description_and_validate_format_only",
      + 84    "load_description",
      + 85    "load_model_description",
      + 86    "load_model",
      + 87    "load_resource",
      + 88    "MemberId",
      + 89    "model_adapters",
      + 90    "predict_many",
      + 91    "predict",
      + 92    "prediction",
      + 93    "PredictionPipeline",
      + 94    "proc_ops",
      + 95    "proc_setup",
      + 96    "sample",
      + 97    "Sample",
      + 98    "save_bioimageio_package_as_folder",
      + 99    "save_bioimageio_package",
      +100    "save_bioimageio_yaml_only",
      +101    "settings",
      +102    "stat_calculators",
      +103    "stat_measures",
      +104    "Stat",
      +105    "tensor",
      +106    "Tensor",
      +107    "test_description",
      +108    "test_model",
      +109    "test_resource",
      +110    "validate_format",
      +111]
       
      @@ -924,7 +1015,7 @@

      0.5.10

      __version__ = -'0.6.10' +'0.7.0'
      @@ -970,7 +1061,7 @@

      0.5.10

      - Axis( id: bioimageio.spec.model.v0_5.AxisId, type: Literal['batch', 'channel', 'index', 'space', 'time']) + Axis( id: AxisId, type: Literal['batch', 'channel', 'index', 'space', 'time'])
      @@ -981,7 +1072,7 @@

      0.5.10

      - id: bioimageio.spec.model.v0_5.AxisId + id: AxisId
      @@ -1007,7 +1098,7 @@

      0.5.10

      @classmethod
      def - create( cls, axis: Union[bioimageio.spec.model.v0_5.AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Axis]) -> Axis: + create( cls, axis: Union[AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Axis]) -> Axis: @@ -1043,10 +1134,10 @@

      0.5.10

      -
      198class AxisId(LowerCaseIdentifier):
      -199    root_model: ClassVar[Type[RootModel[Any]]] = RootModel[
      -200        Annotated[LowerCaseIdentifierAnno, MaxLen(16)]
      -201    ]
      +            
      199class AxisId(LowerCaseIdentifier):
      +200    root_model: ClassVar[Type[RootModel[Any]]] = RootModel[
      +201        Annotated[LowerCaseIdentifierAnno, MaxLen(16)]
      +202    ]
       
      @@ -1333,7 +1424,7 @@

      0.5.10

      - BlockMeta( sample_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int], inner_slice: Mapping[bioimageio.spec.model.v0_5.AxisId, bioimageio.core.common.SliceInfo], halo: Mapping[bioimageio.spec.model.v0_5.AxisId, bioimageio.core.common.Halo], block_index: int, blocks_in_sample: int) + BlockMeta( sample_shape: Mapping[AxisId, int], inner_slice: Mapping[AxisId, bioimageio.core.common.SliceInfo], halo: Mapping[AxisId, bioimageio.core.common.Halo], block_index: int, blocks_in_sample: int)
      @@ -1344,7 +1435,7 @@

      0.5.10

      - sample_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int] + sample_shape: Mapping[AxisId, int]
      @@ -1357,7 +1448,7 @@

      0.5.10

      - inner_slice: Mapping[bioimageio.spec.model.v0_5.AxisId, bioimageio.core.common.SliceInfo] + inner_slice: Mapping[AxisId, bioimageio.core.common.SliceInfo]
      @@ -1370,7 +1461,7 @@

      0.5.10

      - halo: Mapping[bioimageio.spec.model.v0_5.AxisId, bioimageio.core.common.Halo] + halo: Mapping[AxisId, bioimageio.core.common.Halo]
      @@ -1410,7 +1501,7 @@

      0.5.10

      - shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int] + shape: Mapping[AxisId, int] @@ -1436,7 +1527,7 @@

      0.5.10

      - padding: Mapping[bioimageio.spec.model.v0_5.AxisId, bioimageio.core.common.PadWidth] + padding: Mapping[AxisId, bioimageio.core.common.PadWidth] @@ -1477,7 +1568,7 @@

      0.5.10

      - outer_slice: Mapping[bioimageio.spec.model.v0_5.AxisId, bioimageio.core.common.SliceInfo] + outer_slice: Mapping[AxisId, bioimageio.core.common.SliceInfo] @@ -1519,7 +1610,7 @@

      0.5.10

      - inner_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int] + inner_shape: Mapping[AxisId, int] @@ -1540,7 +1631,7 @@

      0.5.10

      - local_slice: Mapping[bioimageio.spec.model.v0_5.AxisId, bioimageio.core.common.SliceInfo] + local_slice: Mapping[AxisId, bioimageio.core.common.SliceInfo] @@ -1569,7 +1660,7 @@

      0.5.10

      - dims: Collection[bioimageio.spec.model.v0_5.AxisId] + dims: Collection[AxisId] @@ -1587,7 +1678,7 @@

      0.5.10

      - tagged_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int] + tagged_shape: Mapping[AxisId, int] @@ -1640,7 +1731,7 @@

      0.5.10

      def - get_transformed( self, new_axes: Mapping[bioimageio.spec.model.v0_5.AxisId, Union[bioimageio.core.block_meta.LinearAxisTransform, int]]) -> Self: + get_transformed( self, new_axes: Mapping[AxisId, Union[bioimageio.core.block_meta.LinearAxisTransform, int]]) -> Self: @@ -1692,41 +1783,41 @@

      0.5.10

      def - build_description( content: Dict[str, YamlValue], /, *, context: Optional[bioimageio.spec._internal.validation_context.ValidationContext] = None, format_version: Union[Literal['latest', 'discover'], str] = 'discover') -> Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.application.v0_3.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.dataset.v0_3.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.notebook.v0_2.NotebookDescr, bioimageio.spec.notebook.v0_3.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.generic.v0_3.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.spec._internal.common_nodes.InvalidDescr]: + build_description( content: Dict[str, YamlValue], /, *, context: Optional[bioimageio.spec.ValidationContext] = None, format_version: Union[Literal['latest', 'discover'], str] = 'discover') -> Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.NotebookDescr, bioimageio.spec.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.spec.InvalidDescr]:
      -
      128def build_description(
      -129    content: BioimageioYamlContent,
      -130    /,
      -131    *,
      -132    context: Optional[ValidationContext] = None,
      -133    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
      -134) -> Union[ResourceDescr, InvalidDescr]:
      -135    """build a bioimage.io resource description from an RDF's content.
      -136
      -137    Use `load_description` if you want to build a resource description from an rdf.yaml
      -138    or bioimage.io zip-package.
      -139
      -140    Args:
      -141        content: loaded rdf.yaml file (loaded with YAML, not bioimageio.spec)
      -142        context: validation context to use during validation
      -143        format_version: (optional) use this argument to load the resource and
      -144                        convert its metadata to a higher format_version
      -145
      -146    Returns:
      -147        An object holding all metadata of the bioimage.io resource
      -148
      -149    """
      +            
      130def build_description(
      +131    content: BioimageioYamlContent,
      +132    /,
      +133    *,
      +134    context: Optional[ValidationContext] = None,
      +135    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
      +136) -> Union[ResourceDescr, InvalidDescr]:
      +137    """build a bioimage.io resource description from an RDF's content.
      +138
      +139    Use `load_description` if you want to build a resource description from an rdf.yaml
      +140    or bioimage.io zip-package.
      +141
      +142    Args:
      +143        content: loaded rdf.yaml file (loaded with YAML, not bioimageio.spec)
      +144        context: validation context to use during validation
      +145        format_version: (optional) use this argument to load the resource and
      +146                        convert its metadata to a higher format_version
      +147
      +148    Returns:
      +149        An object holding all metadata of the bioimage.io resource
       150
      -151    return build_description_impl(
      -152        content,
      -153        context=context,
      -154        format_version=format_version,
      -155        get_rd_class=_get_rd_class,
      -156    )
      +151    """
      +152
      +153    return build_description_impl(
      +154        content,
      +155        context=context,
      +156        format_version=format_version,
      +157        get_rd_class=_get_rd_class,
      +158    )
       
      @@ -1738,7 +1829,7 @@

      0.5.10

      Arguments:
        -
      • content: loaded rdf.yaml file (loaded with YAML, not bioimageio.spec)
      • +
      • content: loaded rdf.yaml file (loaded with YAML, not bioimageio.spec)
      • context: validation context to use during validation
      • format_version: (optional) use this argument to load the resource and convert its metadata to a higher format_version
      • @@ -1758,7 +1849,7 @@
        Returns:
        def - compute_dataset_measures( measures: Iterable[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], dataset: Iterable[Sample]) -> Dict[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f52744531a0>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f52658a8ea0>, return_type=PydanticUndefined, when_used='always')]]]: + compute_dataset_measures( measures: Iterable[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], dataset: Iterable[Sample]) -> Dict[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]: @@ -1794,7 +1885,7 @@
        Returns:
        def - create_prediction_pipeline( bioimageio_model: Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], *, devices: Optional[Sequence[str]] = None, weight_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']] = None, weights_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']] = None, dataset_for_initial_statistics: Iterable[Union[Sample, Sequence[Tensor]]] = (), keep_updating_initial_dataset_statistics: bool = False, fixed_dataset_statistics: Mapping[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]] = mappingproxy({}), model_adapter: Optional[bioimageio.core.model_adapters._model_adapter.ModelAdapter] = None, ns: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int]] = 10, **deprecated_kwargs: Any) -> PredictionPipeline: + create_prediction_pipeline( bioimageio_model: Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], *, devices: Optional[Sequence[str]] = None, weight_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']] = None, weights_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']] = None, dataset_for_initial_statistics: Iterable[Union[Sample, Sequence[Tensor]]] = (), keep_updating_initial_dataset_statistics: bool = False, fixed_dataset_statistics: Mapping[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]] = mappingproxy({}), model_adapter: Optional[bioimageio.core.model_adapters.ModelAdapter] = None, ns: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, AxisId], int]] = 10, **deprecated_kwargs: Any) -> PredictionPipeline: @@ -1885,7 +1976,7 @@
        Returns:
        def - dump_description( rd: Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.application.v0_3.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.dataset.v0_3.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.notebook.v0_2.NotebookDescr, bioimageio.spec.notebook.v0_3.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.generic.v0_3.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.spec._internal.common_nodes.InvalidDescr], exclude_unset: bool = True) -> Dict[str, YamlValue]: + dump_description( rd: Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.NotebookDescr, bioimageio.spec.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.spec.InvalidDescr], exclude_unset: bool = True) -> Dict[str, YamlValue]: @@ -1903,36 +1994,135 @@
        Returns:
        +
      +
      + +
      + + def + enable_determinism(mode: Literal['seed_only', 'full']): + + + +
      + +
      36def enable_determinism(mode: Literal["seed_only", "full"]):
      +37    """Seed and configure ML frameworks for maximum reproducibility.
      +38    May degrade performance. Only recommended for testing reproducibility!
      +39
      +40    Seed any random generators and (if **mode**=="full") request ML frameworks to use
      +41    deterministic algorithms.
      +42    Notes:
      +43        - **mode** == "full"  might degrade performance and throw exceptions.
      +44        - Subsequent inference calls might still differ. Call before each function
      +45          (sequence) that is expected to be reproducible.
      +46        - Degraded performance: Use for testing reproducibility only!
      +47        - Recipes:
      +48            - [PyTorch](https://pytorch.org/docs/stable/notes/randomness.html)
      +49            - [Keras](https://keras.io/examples/keras_recipes/reproducibility_recipes/)
      +50            - [NumPy](https://numpy.org/doc/2.0/reference/random/generated/numpy.random.seed.html)
      +51    """
      +52    try:
      +53        try:
      +54            import numpy.random
      +55        except ImportError:
      +56            pass
      +57        else:
      +58            numpy.random.seed(0)
      +59    except Exception as e:
      +60        logger.debug(str(e))
      +61
      +62    try:
      +63        try:
      +64            import torch
      +65        except ImportError:
      +66            pass
      +67        else:
      +68            _ = torch.manual_seed(0)
      +69            torch.use_deterministic_algorithms(mode == "full")
      +70    except Exception as e:
      +71        logger.debug(str(e))
      +72
      +73    try:
      +74        try:
      +75            import keras
      +76        except ImportError:
      +77            pass
      +78        else:
      +79            keras.utils.set_random_seed(0)
      +80    except Exception as e:
      +81        logger.debug(str(e))
      +82
      +83    try:
      +84        try:
      +85            import tensorflow as tf  # pyright: ignore[reportMissingImports]
      +86        except ImportError:
      +87            pass
      +88        else:
      +89            tf.random.seed(0)
      +90            if mode == "full":
      +91                tf.config.experimental.enable_op_determinism()
      +92            # TODO: find possibility to switch it off again??
      +93    except Exception as e:
      +94        logger.debug(str(e))
      +
      + + +

      Seed and configure ML frameworks for maximum reproducibility. +May degrade performance. Only recommended for testing reproducibility!

      + +

      Seed any random generators and (if mode=="full") request ML frameworks to use +deterministic algorithms.

      + +
      Notes:
      + +
      +
        +
      • mode == "full" might degrade performance and throw exceptions.
      • +
      • Subsequent inference calls might still differ. Call before each function + (sequence) that is expected to be reproducible.
      • +
      • Degraded performance: Use for testing reproducibility only!
      • +
      • Recipes: +
      • +
      +
      +
      + +
      def - load_dataset_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)]], /, *, format_version: Union[Literal['discover'], Literal['latest'], str] = 'discover', perform_io_checks: bool = True, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.dataset.v0_3.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]: + load_dataset_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile], /, *, format_version: Union[Literal['discover'], Literal['latest'], str] = 'discover', perform_io_checks: bool = True, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]:
      -
       95def load_dataset_description(
      - 96    source: PermissiveFileSource,
      - 97    /,
      - 98    *,
      - 99    format_version: Union[Literal["discover"], Literal["latest"], str] = DISCOVER,
      -100    perform_io_checks: bool = settings.perform_io_checks,
      -101    known_files: Optional[Dict[str, Sha256]] = None,
      -102) -> AnyDatasetDescr:
      -103    """same as `load_description`, but addtionally ensures that the loaded
      -104    description is valid and of type 'dataset'.
      -105    """
      -106    rd = load_description(
      -107        source,
      -108        format_version=format_version,
      -109        perform_io_checks=perform_io_checks,
      -110        known_files=known_files,
      -111    )
      -112    return ensure_description_is_dataset(rd)
      +            
       98def load_dataset_description(
      + 99    source: Union[PermissiveFileSource, ZipFile],
      +100    /,
      +101    *,
      +102    format_version: Union[Literal["discover"], Literal["latest"], str] = DISCOVER,
      +103    perform_io_checks: bool = settings.perform_io_checks,
      +104    known_files: Optional[Dict[str, Sha256]] = None,
      +105) -> AnyDatasetDescr:
      +106    """same as `load_description`, but addtionally ensures that the loaded
      +107    description is valid and of type 'dataset'.
      +108    """
      +109    rd = load_description(
      +110        source,
      +111        format_version=format_version,
      +112        perform_io_checks=perform_io_checks,
      +113        known_files=known_files,
      +114    )
      +115    return ensure_description_is_dataset(rd)
       
      @@ -1947,67 +2137,82 @@
      Returns:
      def - load_description_and_test( source: Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.application.v0_3.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.dataset.v0_3.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.notebook.v0_2.NotebookDescr, bioimageio.spec.notebook.v0_3.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.generic.v0_3.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], Dict[str, YamlValue]], *, format_version: Union[Literal['discover', 'latest'], str] = 'discover', weight_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']] = None, devices: Optional[Sequence[str]] = None, absolute_tolerance: float = 0.00015, relative_tolerance: float = 0.0001, decimal: Optional[int] = None, expected_type: Optional[str] = None) -> Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.application.v0_3.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.dataset.v0_3.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.notebook.v0_2.NotebookDescr, bioimageio.spec.notebook.v0_3.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.generic.v0_3.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.spec._internal.common_nodes.InvalidDescr]: + load_description_and_test( source: Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.NotebookDescr, bioimageio.spec.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], Dict[str, YamlValue]], *, format_version: Union[Literal['discover', 'latest'], str] = 'discover', weight_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']] = None, devices: Optional[Sequence[str]] = None, absolute_tolerance: float = 0.00015, relative_tolerance: float = 0.0001, decimal: Optional[int] = None, determinism: Literal['seed_only', 'full'] = 'seed_only', expected_type: Optional[str] = None) -> Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.NotebookDescr, bioimageio.spec.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.spec.InvalidDescr]:
      -
       82def load_description_and_test(
      - 83    source: Union[ResourceDescr, PermissiveFileSource, BioimageioYamlContent],
      - 84    *,
      - 85    format_version: Union[Literal["discover", "latest"], str] = "discover",
      - 86    weight_format: Optional[WeightsFormat] = None,
      - 87    devices: Optional[Sequence[str]] = None,
      - 88    absolute_tolerance: float = 1.5e-4,
      - 89    relative_tolerance: float = 1e-4,
      - 90    decimal: Optional[int] = None,
      - 91    expected_type: Optional[str] = None,
      - 92) -> Union[ResourceDescr, InvalidDescr]:
      - 93    """Test RDF dynamically, e.g. model inference of test inputs"""
      - 94    # NOTE: `decimal` is a legacy argument and is handled in `_test_model_inference`
      - 95    if (
      - 96        isinstance(source, ResourceDescrBase)
      - 97        and format_version != "discover"
      - 98        and source.format_version != format_version
      - 99    ):
      -100        warnings.warn(
      -101            f"deserializing source to ensure we validate and test using format {format_version}"
      -102        )
      -103        source = dump_description(source)
      -104
      -105    if isinstance(source, ResourceDescrBase):
      -106        rd = source
      -107    elif isinstance(source, dict):
      -108        rd = build_description(source, format_version=format_version)
      -109    else:
      -110        rd = load_description(source, format_version=format_version)
      -111
      -112    rd.validation_summary.env.append(
      -113        InstalledPackage(name="bioimageio.core", version=VERSION)
      -114    )
      -115
      -116    if expected_type is not None:
      -117        _test_expected_resource_type(rd, expected_type)
      -118
      -119    if isinstance(rd, (v0_4.ModelDescr, v0_5.ModelDescr)):
      -120        if weight_format is None:
      -121            weight_formats: List[WeightsFormat] = [
      -122                w for w, we in rd.weights if we is not None
      -123            ]  # pyright: ignore[reportAssignmentType]
      -124        else:
      -125            weight_formats = [weight_format]
      -126        for w in weight_formats:
      -127            _test_model_inference(
      -128                rd, w, devices, absolute_tolerance, relative_tolerance, decimal
      -129            )
      -130            if not isinstance(rd, v0_4.ModelDescr):
      -131                _test_model_inference_parametrized(rd, w, devices)
      -132
      -133    # TODO: add execution of jupyter notebooks
      -134    # TODO: add more tests
      -135
      -136    return rd
      +            
      147def load_description_and_test(
      +148    source: Union[ResourceDescr, PermissiveFileSource, BioimageioYamlContent],
      +149    *,
      +150    format_version: Union[Literal["discover", "latest"], str] = "discover",
      +151    weight_format: Optional[WeightsFormat] = None,
      +152    devices: Optional[Sequence[str]] = None,
      +153    absolute_tolerance: float = 1.5e-4,
      +154    relative_tolerance: float = 1e-4,
      +155    decimal: Optional[int] = None,
      +156    determinism: Literal["seed_only", "full"] = "seed_only",
      +157    expected_type: Optional[str] = None,
      +158) -> Union[ResourceDescr, InvalidDescr]:
      +159    """Test RDF dynamically, e.g. model inference of test inputs"""
      +160    if (
      +161        isinstance(source, ResourceDescrBase)
      +162        and format_version != "discover"
      +163        and source.format_version != format_version
      +164    ):
      +165        warnings.warn(
      +166            f"deserializing source to ensure we validate and test using format {format_version}"
      +167        )
      +168        source = dump_description(source)
      +169
      +170    if isinstance(source, ResourceDescrBase):
      +171        rd = source
      +172    elif isinstance(source, dict):
      +173        rd = build_description(source, format_version=format_version)
      +174    else:
      +175        rd = load_description(source, format_version=format_version)
      +176
      +177    rd.validation_summary.env.add(
      +178        InstalledPackage(name="bioimageio.core", version=VERSION)
      +179    )
      +180
      +181    if expected_type is not None:
      +182        _test_expected_resource_type(rd, expected_type)
      +183
      +184    if isinstance(rd, (v0_4.ModelDescr, v0_5.ModelDescr)):
      +185        if weight_format is None:
      +186            weight_formats: List[WeightsFormat] = [
      +187                w for w, we in rd.weights if we is not None
      +188            ]  # pyright: ignore[reportAssignmentType]
      +189        else:
      +190            weight_formats = [weight_format]
      +191
      +192        if decimal is None:
      +193            atol = absolute_tolerance
      +194            rtol = relative_tolerance
      +195        else:
      +196            warnings.warn(
      +197                "The argument `decimal` has been deprecated in favour of"
      +198                + " `relative_tolerance` and `absolute_tolerance`, with different"
      +199                + " validation logic, using `numpy.testing.assert_allclose, see"
      +200                + " 'https://numpy.org/doc/stable/reference/generated/"
      +201                + " numpy.testing.assert_allclose.html'. Passing a value for `decimal`"
      +202                + " will cause validation to revert to the old behaviour."
      +203            )
      +204            atol = 1.5 * 10 ** (-decimal)
      +205            rtol = 0
      +206
      +207        enable_determinism(determinism)
      +208        for w in weight_formats:
      +209            _test_model_inference(rd, w, devices, atol, rtol)
      +210            if not isinstance(rd, v0_4.ModelDescr):
      +211                _test_model_inference_parametrized(rd, w, devices)
      +212
      +213    # TODO: add execution of jupyter notebooks
      +214    # TODO: add more tests
      +215
      +216    return rd
       
      @@ -2021,45 +2226,45 @@
      Returns:
      def - load_description_and_validate_format_only( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)]], /, *, format_version: Union[Literal['discover'], Literal['latest'], str] = 'discover', perform_io_checks: bool = True, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> bioimageio.spec.summary.ValidationSummary: + load_description_and_validate_format_only( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile], /, *, format_version: Union[Literal['discover'], Literal['latest'], str] = 'discover', perform_io_checks: bool = True, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> bioimageio.spec.ValidationSummary:
      -
      134def load_description_and_validate_format_only(
      -135    source: PermissiveFileSource,
      -136    /,
      -137    *,
      -138    format_version: Union[Literal["discover"], Literal["latest"], str] = DISCOVER,
      -139    perform_io_checks: bool = settings.perform_io_checks,
      -140    known_files: Optional[Dict[str, Sha256]] = None,
      -141) -> ValidationSummary:
      -142    """load a bioimage.io resource description
      -143
      -144    Args:
      -145        source: Path or URL to an rdf.yaml or a bioimage.io package
      -146                (zip-file with rdf.yaml in it).
      -147        format_version: (optional) Use this argument to load the resource and
      -148                        convert its metadata to a higher format_version.
      -149        perform_io_checks: Wether or not to perform validation that requires file io,
      -150                           e.g. downloading a remote files. The existence of local
      -151                           absolute file paths is still being checked.
      -152        known_files: Allows to bypass download and hashing of referenced files
      -153                     (even if perform_io_checks is True).
      -154
      -155    Returns:
      -156        Validation summary of the bioimage.io resource found at `source`.
      +            
      137def load_description_and_validate_format_only(
      +138    source: Union[PermissiveFileSource, ZipFile],
      +139    /,
      +140    *,
      +141    format_version: Union[Literal["discover"], Literal["latest"], str] = DISCOVER,
      +142    perform_io_checks: bool = settings.perform_io_checks,
      +143    known_files: Optional[Dict[str, Sha256]] = None,
      +144) -> ValidationSummary:
      +145    """load a bioimage.io resource description
      +146
      +147    Args:
      +148        source: Path or URL to an rdf.yaml or a bioimage.io package
      +149                (zip-file with rdf.yaml in it).
      +150        format_version: (optional) Use this argument to load the resource and
      +151                        convert its metadata to a higher format_version.
      +152        perform_io_checks: Wether or not to perform validation that requires file io,
      +153                           e.g. downloading a remote files. The existence of local
      +154                           absolute file paths is still being checked.
      +155        known_files: Allows to bypass download and hashing of referenced files
      +156                     (even if perform_io_checks is True).
       157
      -158    """
      -159    rd = load_description(
      -160        source,
      -161        format_version=format_version,
      -162        perform_io_checks=perform_io_checks,
      -163        known_files=known_files,
      -164    )
      -165    assert rd.validation_summary is not None
      -166    return rd.validation_summary
      +158    Returns:
      +159        Validation summary of the bioimage.io resource found at `source`.
      +160
      +161    """
      +162    rd = load_description(
      +163        source,
      +164        format_version=format_version,
      +165        perform_io_checks=perform_io_checks,
      +166        known_files=known_files,
      +167    )
      +168    assert rd.validation_summary is not None
      +169    return rd.validation_summary
       
      @@ -2093,14 +2298,14 @@
      Returns:
      def - load_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)]], /, *, format_version: Union[Literal['discover'], Literal['latest'], str] = 'discover', perform_io_checks: bool = True, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.application.v0_3.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.dataset.v0_3.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.notebook.v0_2.NotebookDescr, bioimageio.spec.notebook.v0_3.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.generic.v0_3.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.spec._internal.common_nodes.InvalidDescr]: + load_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile], /, *, format_version: Union[Literal['discover'], Literal['latest'], str] = 'discover', perform_io_checks: bool = True, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.NotebookDescr, bioimageio.spec.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.spec.InvalidDescr]:
      29def load_description(
      -30    source: PermissiveFileSource,
      +30    source: Union[PermissiveFileSource, ZipFile],
       31    /,
       32    *,
       33    format_version: Union[Literal["discover"], Literal["latest"], str] = DISCOVER,
      @@ -2176,14 +2381,14 @@ 
      Returns:
      def - load_model_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)]], /, *, format_version: Union[Literal['discover'], Literal['latest'], str] = 'discover', perform_io_checks: bool = True, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]: + load_model_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile], /, *, format_version: Union[Literal['discover'], Literal['latest'], str] = 'discover', perform_io_checks: bool = True, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]:
      75def load_model_description(
      -76    source: PermissiveFileSource,
      +76    source: Union[PermissiveFileSource, ZipFile],
       77    /,
       78    *,
       79    format_version: Union[Literal["discover"], Literal["latest"], str] = DISCOVER,
      @@ -2192,19 +2397,28 @@ 
      Returns:
      82) -> AnyModelDescr: 83 """same as `load_description`, but addtionally ensures that the loaded 84 description is valid and of type 'model'. -85 """ -86 rd = load_description( -87 source, -88 format_version=format_version, -89 perform_io_checks=perform_io_checks, -90 known_files=known_files, -91 ) -92 return ensure_description_is_model(rd) +85 +86 Raises: +87 ValueError: for invalid or non-model resources +88 """ +89 rd = load_description( +90 source, +91 format_version=format_version, +92 perform_io_checks=perform_io_checks, +93 known_files=known_files, +94 ) +95 return ensure_description_is_model(rd)

      same as load_description, but addtionally ensures that the loaded description is valid and of type 'model'.

      + +
      Raises:
      + +
        +
      • ValueError: for invalid or non-model resources
      • +
      @@ -2214,14 +2428,14 @@
      Returns:
      def - load_model( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)]], /, *, format_version: Union[Literal['discover'], Literal['latest'], str] = 'discover', perform_io_checks: bool = True, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]: + load_model( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile], /, *, format_version: Union[Literal['discover'], Literal['latest'], str] = 'discover', perform_io_checks: bool = True, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]:
      75def load_model_description(
      -76    source: PermissiveFileSource,
      +76    source: Union[PermissiveFileSource, ZipFile],
       77    /,
       78    *,
       79    format_version: Union[Literal["discover"], Literal["latest"], str] = DISCOVER,
      @@ -2230,19 +2444,21 @@ 
      Returns:
      82) -> AnyModelDescr: 83 """same as `load_description`, but addtionally ensures that the loaded 84 description is valid and of type 'model'. -85 """ -86 rd = load_description( -87 source, -88 format_version=format_version, -89 perform_io_checks=perform_io_checks, -90 known_files=known_files, -91 ) -92 return ensure_description_is_model(rd) +85 +86 Raises: +87 ValueError: for invalid or non-model resources +88 """ +89 rd = load_description( +90 source, +91 format_version=format_version, +92 perform_io_checks=perform_io_checks, +93 known_files=known_files, +94 ) +95 return ensure_description_is_model(rd)
      -

      same as load_description, but addtionally ensures that the loaded -description is valid and of type 'model'.

      + @@ -2252,14 +2468,14 @@
      Returns:
      def - load_resource( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)]], /, *, format_version: Union[Literal['discover'], Literal['latest'], str] = 'discover', perform_io_checks: bool = True, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.application.v0_3.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.dataset.v0_3.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.notebook.v0_2.NotebookDescr, bioimageio.spec.notebook.v0_3.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.generic.v0_3.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.spec._internal.common_nodes.InvalidDescr]: + load_resource( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile], /, *, format_version: Union[Literal['discover'], Literal['latest'], str] = 'discover', perform_io_checks: bool = True, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.NotebookDescr, bioimageio.spec.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.spec.InvalidDescr]:
      29def load_description(
      -30    source: PermissiveFileSource,
      +30    source: Union[PermissiveFileSource, ZipFile],
       31    /,
       32    *,
       33    format_version: Union[Literal["discover"], Literal["latest"], str] = DISCOVER,
      @@ -2305,27 +2521,7 @@ 
      Returns:
      -

      load a bioimage.io resource description

      - -
      Arguments:
      - -
        -
      • source: Path or URL to an rdf.yaml or a bioimage.io package -(zip-file with rdf.yaml in it).
      • -
      • format_version: (optional) Use this argument to load the resource and -convert its metadata to a higher format_version.
      • -
      • perform_io_checks: Wether or not to perform validation that requires file io, -e.g. downloading a remote files. The existence of local -absolute file paths is still being checked.
      • -
      • known_files: Allows to bypass download and hashing of referenced files -(even if perform_io_checks is True).
      • -
      - -
      Returns:
      - -
      -

      An object holding all metadata of the bioimage.io resource

      -
      + @@ -2333,7 +2529,7 @@
      Returns:
      MemberId = -<class 'bioimageio.spec.model.v0_5.TensorId'> +<class 'bioimageio.spec.model.v0_5.TensorId'>
      @@ -2347,7 +2543,7 @@
      Returns:
      def - predict_many( *, model: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr, PredictionPipeline], inputs: Iterable[Mapping[bioimageio.spec.model.v0_5.TensorId, Union[Tensor, xarray.core.dataarray.DataArray, numpy.ndarray[Any, numpy.dtype[Any]], pathlib.Path]]], sample_id: str = 'sample{i:03}', blocksize_parameter: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], NoneType] = None, skip_preprocessing: bool = False, skip_postprocessing: bool = False, save_output_path: Union[pathlib.Path, str, NoneType] = None) -> Iterator[Sample]: + predict_many( *, model: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr, PredictionPipeline], inputs: Iterable[Mapping[bioimageio.spec.model.v0_5.TensorId, Union[Tensor, xarray.core.dataarray.DataArray, numpy.ndarray[Any, numpy.dtype[Any]], pathlib.Path]]], sample_id: str = 'sample{i:03}', blocksize_parameter: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, AxisId], int], NoneType] = None, skip_preprocessing: bool = False, skip_postprocessing: bool = False, save_output_path: Union[pathlib.Path, str, NoneType] = None) -> Iterator[Sample]: @@ -2457,7 +2653,7 @@
      Arguments:
      def - predict( *, model: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr, PredictionPipeline], inputs: Union[Sample, Mapping[bioimageio.spec.model.v0_5.TensorId, Union[Tensor, xarray.core.dataarray.DataArray, numpy.ndarray[Any, numpy.dtype[Any]], pathlib.Path]]], sample_id: Hashable = 'sample', blocksize_parameter: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], NoneType] = None, input_block_shape: Optional[Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]]] = None, skip_preprocessing: bool = False, skip_postprocessing: bool = False, save_output_path: Union[pathlib.Path, str, NoneType] = None) -> Sample: + predict( *, model: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr, PredictionPipeline], inputs: Union[Sample, Mapping[bioimageio.spec.model.v0_5.TensorId, Union[Tensor, xarray.core.dataarray.DataArray, numpy.ndarray[Any, numpy.dtype[Any]], pathlib.Path]]], sample_id: Hashable = 'sample', blocksize_parameter: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, AxisId], int], NoneType] = None, input_block_shape: Optional[Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[AxisId, int]]] = None, skip_preprocessing: bool = False, skip_postprocessing: bool = False, save_output_path: Union[pathlib.Path, str, NoneType] = None) -> Sample: @@ -2716,7 +2912,7 @@
      Arguments:
      165 if out is not None 166 }, 167 stat=sample.stat, -168 id=self.get_output_sample_id(sample.id), +168 id=sample.id, 169 ) 170 if not skip_postprocessing: 171 self.apply_postprocessing(output) @@ -2724,12 +2920,12 @@
      Arguments:
      173 return output 174 175 def get_output_sample_id(self, input_sample_id: SampleId): -176 if input_sample_id is None: -177 return None -178 else: -179 return f"{input_sample_id}_" + ( -180 self.model_description.id or self.model_description.name -181 ) +176 warnings.warn( +177 "`PredictionPipeline.get_output_sample_id()` is deprecated and will be" +178 + " removed soon. Output sample id is equal to input sample id, hence this" +179 + " function is not needed." +180 ) +181 return input_sample_id 182 183 def predict_sample_with_fixed_blocking( 184 self, @@ -2877,7 +3073,7 @@
      Arguments:
      - PredictionPipeline( *, name: str, model_description: Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], preprocessing: List[Union[bioimageio.core.proc_ops.AddKnownDatasetStats, bioimageio.core.proc_ops.Binarize, bioimageio.core.proc_ops.Clip, bioimageio.core.proc_ops.EnsureDtype, bioimageio.core.proc_ops.FixedZeroMeanUnitVariance, bioimageio.core.proc_ops.ScaleLinear, bioimageio.core.proc_ops.ScaleMeanVariance, bioimageio.core.proc_ops.ScaleRange, bioimageio.core.proc_ops.Sigmoid, bioimageio.core.proc_ops.UpdateStats, bioimageio.core.proc_ops.ZeroMeanUnitVariance]], postprocessing: List[Union[bioimageio.core.proc_ops.AddKnownDatasetStats, bioimageio.core.proc_ops.Binarize, bioimageio.core.proc_ops.Clip, bioimageio.core.proc_ops.EnsureDtype, bioimageio.core.proc_ops.FixedZeroMeanUnitVariance, bioimageio.core.proc_ops.ScaleLinear, bioimageio.core.proc_ops.ScaleMeanVariance, bioimageio.core.proc_ops.ScaleRange, bioimageio.core.proc_ops.Sigmoid, bioimageio.core.proc_ops.UpdateStats, bioimageio.core.proc_ops.ZeroMeanUnitVariance]], model_adapter: bioimageio.core.model_adapters._model_adapter.ModelAdapter, default_ns: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int]] = 10, default_batch_size: int = 1) + PredictionPipeline( *, name: str, model_description: Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], preprocessing: List[Union[bioimageio.core.proc_ops.AddKnownDatasetStats, bioimageio.core.proc_ops.Binarize, bioimageio.core.proc_ops.Clip, bioimageio.core.proc_ops.EnsureDtype, bioimageio.core.proc_ops.FixedZeroMeanUnitVariance, bioimageio.core.proc_ops.ScaleLinear, bioimageio.core.proc_ops.ScaleMeanVariance, bioimageio.core.proc_ops.ScaleRange, bioimageio.core.proc_ops.Sigmoid, bioimageio.core.proc_ops.UpdateStats, bioimageio.core.proc_ops.ZeroMeanUnitVariance]], postprocessing: List[Union[bioimageio.core.proc_ops.AddKnownDatasetStats, bioimageio.core.proc_ops.Binarize, bioimageio.core.proc_ops.Clip, bioimageio.core.proc_ops.EnsureDtype, bioimageio.core.proc_ops.FixedZeroMeanUnitVariance, bioimageio.core.proc_ops.ScaleLinear, bioimageio.core.proc_ops.ScaleMeanVariance, bioimageio.core.proc_ops.ScaleRange, bioimageio.core.proc_ops.Sigmoid, bioimageio.core.proc_ops.UpdateStats, bioimageio.core.proc_ops.ZeroMeanUnitVariance]], model_adapter: bioimageio.core.model_adapters.ModelAdapter, default_ns: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, AxisId], int]] = 10, default_batch_size: int = 1) @@ -2965,7 +3161,7 @@
      Arguments:
      def - predict_sample_block( self, sample_block: bioimageio.core.sample.SampleBlockWithOrigin, skip_preprocessing: bool = False, skip_postprocessing: bool = False) -> bioimageio.core.sample.SampleBlock: + predict_sample_block( self, sample_block: bioimageio.core.sample.SampleBlockWithOrigin, skip_preprocessing: bool = False, skip_postprocessing: bool = False) -> bioimageio.core.sample.SampleBlock: @@ -3047,7 +3243,7 @@
      Arguments:
      165 if out is not None 166 }, 167 stat=sample.stat, -168 id=self.get_output_sample_id(sample.id), +168 id=sample.id, 169 ) 170 if not skip_postprocessing: 171 self.apply_postprocessing(output) @@ -3075,12 +3271,12 @@
      Arguments:
      175    def get_output_sample_id(self, input_sample_id: SampleId):
      -176        if input_sample_id is None:
      -177            return None
      -178        else:
      -179            return f"{input_sample_id}_" + (
      -180                self.model_description.id or self.model_description.name
      -181            )
      +176        warnings.warn(
      +177            "`PredictionPipeline.get_output_sample_id()` is deprecated and will be"
      +178            + " removed soon. Output sample id is equal to input sample id, hence this"
      +179            + " function is not needed."
      +180        )
      +181        return input_sample_id
       
      @@ -3092,7 +3288,7 @@
      Arguments:
      def - predict_sample_with_fixed_blocking( self, sample: Sample, input_block_shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]], *, skip_preprocessing: bool = False, skip_postprocessing: bool = False) -> Sample: + predict_sample_with_fixed_blocking( self, sample: Sample, input_block_shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[AxisId, int]], *, skip_preprocessing: bool = False, skip_postprocessing: bool = False) -> Sample: @@ -3145,7 +3341,7 @@
      Arguments:
      def - predict_sample_with_blocking( self, sample: Sample, skip_preprocessing: bool = False, skip_postprocessing: bool = False, ns: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], NoneType] = None, batch_size: Optional[int] = None) -> Sample: + predict_sample_with_blocking( self, sample: Sample, skip_preprocessing: bool = False, skip_postprocessing: bool = False, ns: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, AxisId], int], NoneType] = None, batch_size: Optional[int] = None) -> Sample: @@ -3204,7 +3400,7 @@
      Arguments:
      def - apply_preprocessing( self, sample: Union[Sample, bioimageio.core.sample.SampleBlockWithOrigin]) -> None: + apply_preprocessing( self, sample: Union[Sample, bioimageio.core.sample.SampleBlockWithOrigin]) -> None: @@ -3227,7 +3423,7 @@
      Arguments:
      def - apply_postprocessing( self, sample: Union[Sample, bioimageio.core.sample.SampleBlock, bioimageio.core.sample.SampleBlockWithOrigin]) -> None: + apply_postprocessing( self, sample: Union[Sample, bioimageio.core.sample.SampleBlock, bioimageio.core.sample.SampleBlockWithOrigin]) -> None: @@ -3315,103 +3511,103 @@
      Arguments:
      -
       45@dataclass
      - 46class Sample:
      - 47    """A dataset sample"""
      - 48
      - 49    members: Dict[MemberId, Tensor]
      - 50    """the sample's tensors"""
      - 51
      - 52    stat: Stat
      - 53    """sample and dataset statistics"""
      - 54
      - 55    id: SampleId
      - 56    """identifier within the sample's dataset"""
      - 57
      - 58    @property
      - 59    def shape(self) -> PerMember[PerAxis[int]]:
      - 60        return {tid: t.sizes for tid, t in self.members.items()}
      - 61
      - 62    def split_into_blocks(
      - 63        self,
      - 64        block_shapes: PerMember[PerAxis[int]],
      - 65        halo: PerMember[PerAxis[HaloLike]],
      - 66        pad_mode: PadMode,
      - 67        broadcast: bool = False,
      - 68    ) -> Tuple[TotalNumberOfBlocks, Iterable[SampleBlockWithOrigin]]:
      - 69        assert not (
      - 70            missing := [m for m in block_shapes if m not in self.members]
      - 71        ), f"`block_shapes` specified for unknown members: {missing}"
      - 72        assert not (
      - 73            missing := [m for m in halo if m not in block_shapes]
      - 74        ), f"`halo` specified for members without `block_shape`: {missing}"
      - 75
      - 76        n_blocks, blocks = split_multiple_shapes_into_blocks(
      - 77            shapes=self.shape,
      - 78            block_shapes=block_shapes,
      - 79            halo=halo,
      - 80            broadcast=broadcast,
      - 81        )
      - 82        return n_blocks, sample_block_generator(blocks, origin=self, pad_mode=pad_mode)
      - 83
      - 84    def as_single_block(self, halo: Optional[PerMember[PerAxis[Halo]]] = None):
      - 85        if halo is None:
      - 86            halo = {}
      - 87        return SampleBlockWithOrigin(
      - 88            sample_shape=self.shape,
      - 89            sample_id=self.id,
      - 90            blocks={
      - 91                m: Block(
      - 92                    sample_shape=self.shape[m],
      - 93                    data=data,
      - 94                    inner_slice={
      - 95                        a: SliceInfo(0, s) for a, s in data.tagged_shape.items()
      - 96                    },
      - 97                    halo=halo.get(m, {}),
      - 98                    block_index=0,
      - 99                    blocks_in_sample=1,
      -100                )
      -101                for m, data in self.members.items()
      -102            },
      -103            stat=self.stat,
      -104            origin=self,
      -105            block_index=0,
      -106            blocks_in_sample=1,
      -107        )
      -108
      -109    @classmethod
      -110    def from_blocks(
      -111        cls,
      -112        sample_blocks: Iterable[SampleBlock],
      -113        *,
      -114        fill_value: float = float("nan"),
      -115    ) -> Self:
      -116        members: PerMember[Tensor] = {}
      -117        stat: Stat = {}
      -118        sample_id = None
      -119        for sample_block in sample_blocks:
      -120            assert sample_id is None or sample_id == sample_block.sample_id
      -121            sample_id = sample_block.sample_id
      -122            stat = sample_block.stat
      -123            for m, block in sample_block.blocks.items():
      -124                if m not in members:
      -125                    if -1 in block.sample_shape.values():
      -126                        raise NotImplementedError(
      -127                            "merging blocks with data dependent axis not yet implemented"
      -128                        )
      -129
      -130                    members[m] = Tensor(
      -131                        np.full(
      -132                            tuple(block.sample_shape[a] for a in block.data.dims),
      -133                            fill_value,
      -134                            dtype=block.data.dtype,
      -135                        ),
      -136                        dims=block.data.dims,
      -137                    )
      -138
      -139                members[m][block.inner_slice] = block.inner_data
      -140
      -141        return cls(members=members, stat=stat, id=sample_id)
      +            
       44@dataclass
      + 45class Sample:
      + 46    """A dataset sample"""
      + 47
      + 48    members: Dict[MemberId, Tensor]
      + 49    """the sample's tensors"""
      + 50
      + 51    stat: Stat
      + 52    """sample and dataset statistics"""
      + 53
      + 54    id: SampleId
      + 55    """identifier within the sample's dataset"""
      + 56
      + 57    @property
      + 58    def shape(self) -> PerMember[PerAxis[int]]:
      + 59        return {tid: t.sizes for tid, t in self.members.items()}
      + 60
      + 61    def split_into_blocks(
      + 62        self,
      + 63        block_shapes: PerMember[PerAxis[int]],
      + 64        halo: PerMember[PerAxis[HaloLike]],
      + 65        pad_mode: PadMode,
      + 66        broadcast: bool = False,
      + 67    ) -> Tuple[TotalNumberOfBlocks, Iterable[SampleBlockWithOrigin]]:
      + 68        assert not (
      + 69            missing := [m for m in block_shapes if m not in self.members]
      + 70        ), f"`block_shapes` specified for unknown members: {missing}"
      + 71        assert not (
      + 72            missing := [m for m in halo if m not in block_shapes]
      + 73        ), f"`halo` specified for members without `block_shape`: {missing}"
      + 74
      + 75        n_blocks, blocks = split_multiple_shapes_into_blocks(
      + 76            shapes=self.shape,
      + 77            block_shapes=block_shapes,
      + 78            halo=halo,
      + 79            broadcast=broadcast,
      + 80        )
      + 81        return n_blocks, sample_block_generator(blocks, origin=self, pad_mode=pad_mode)
      + 82
      + 83    def as_single_block(self, halo: Optional[PerMember[PerAxis[Halo]]] = None):
      + 84        if halo is None:
      + 85            halo = {}
      + 86        return SampleBlockWithOrigin(
      + 87            sample_shape=self.shape,
      + 88            sample_id=self.id,
      + 89            blocks={
      + 90                m: Block(
      + 91                    sample_shape=self.shape[m],
      + 92                    data=data,
      + 93                    inner_slice={
      + 94                        a: SliceInfo(0, s) for a, s in data.tagged_shape.items()
      + 95                    },
      + 96                    halo=halo.get(m, {}),
      + 97                    block_index=0,
      + 98                    blocks_in_sample=1,
      + 99                )
      +100                for m, data in self.members.items()
      +101            },
      +102            stat=self.stat,
      +103            origin=self,
      +104            block_index=0,
      +105            blocks_in_sample=1,
      +106        )
      +107
      +108    @classmethod
      +109    def from_blocks(
      +110        cls,
      +111        sample_blocks: Iterable[SampleBlock],
      +112        *,
      +113        fill_value: float = float("nan"),
      +114    ) -> Self:
      +115        members: PerMember[Tensor] = {}
      +116        stat: Stat = {}
      +117        sample_id = None
      +118        for sample_block in sample_blocks:
      +119            assert sample_id is None or sample_id == sample_block.sample_id
      +120            sample_id = sample_block.sample_id
      +121            stat = sample_block.stat
      +122            for m, block in sample_block.blocks.items():
      +123                if m not in members:
      +124                    if -1 in block.sample_shape.values():
      +125                        raise NotImplementedError(
      +126                            "merging blocks with data dependent axis not yet implemented"
      +127                        )
      +128
      +129                    members[m] = Tensor(
      +130                        np.full(
      +131                            tuple(block.sample_shape[a] for a in block.data.dims),
      +132                            fill_value,
      +133                            dtype=block.data.dtype,
      +134                        ),
      +135                        dims=block.data.dims,
      +136                    )
      +137
      +138                members[m][block.inner_slice] = block.inner_data
      +139
      +140        return cls(members=members, stat=stat, id=sample_id)
       
      @@ -3422,7 +3618,7 @@
      Arguments:
      - Sample( members: Dict[bioimageio.spec.model.v0_5.TensorId, Tensor], stat: Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]], id: Hashable) + Sample( members: Dict[bioimageio.spec.model.v0_5.TensorId, Tensor], stat: Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]], id: Hashable)
      @@ -3433,7 +3629,7 @@
      Arguments:
      - members: Dict[bioimageio.spec.model.v0_5.TensorId, Tensor] + members: Dict[bioimageio.spec.model.v0_5.TensorId, Tensor]
      @@ -3446,7 +3642,7 @@
      Arguments:
      - stat: Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f52744531a0>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f52658a8ea0>, return_type=PydanticUndefined, when_used='always')]]] + stat: Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]
      @@ -3473,15 +3669,15 @@
      Arguments:
      - shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]] + shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[AxisId, int]]
      -
      58    @property
      -59    def shape(self) -> PerMember[PerAxis[int]]:
      -60        return {tid: t.sizes for tid, t in self.members.items()}
      +            
      57    @property
      +58    def shape(self) -> PerMember[PerAxis[int]]:
      +59        return {tid: t.sizes for tid, t in self.members.items()}
       
      @@ -3493,33 +3689,33 @@
      Arguments:
      def - split_into_blocks( self, block_shapes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]], halo: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, Union[int, Tuple[int, int], bioimageio.core.common.Halo]]], pad_mode: Literal['edge', 'reflect', 'symmetric'], broadcast: bool = False) -> Tuple[int, Iterable[bioimageio.core.sample.SampleBlockWithOrigin]]: + split_into_blocks( self, block_shapes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[AxisId, int]], halo: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[AxisId, Union[int, Tuple[int, int], bioimageio.core.common.Halo]]], pad_mode: Literal['edge', 'reflect', 'symmetric'], broadcast: bool = False) -> Tuple[int, Iterable[bioimageio.core.sample.SampleBlockWithOrigin]]:
      -
      62    def split_into_blocks(
      -63        self,
      -64        block_shapes: PerMember[PerAxis[int]],
      -65        halo: PerMember[PerAxis[HaloLike]],
      -66        pad_mode: PadMode,
      -67        broadcast: bool = False,
      -68    ) -> Tuple[TotalNumberOfBlocks, Iterable[SampleBlockWithOrigin]]:
      -69        assert not (
      -70            missing := [m for m in block_shapes if m not in self.members]
      -71        ), f"`block_shapes` specified for unknown members: {missing}"
      -72        assert not (
      -73            missing := [m for m in halo if m not in block_shapes]
      -74        ), f"`halo` specified for members without `block_shape`: {missing}"
      -75
      -76        n_blocks, blocks = split_multiple_shapes_into_blocks(
      -77            shapes=self.shape,
      -78            block_shapes=block_shapes,
      -79            halo=halo,
      -80            broadcast=broadcast,
      -81        )
      -82        return n_blocks, sample_block_generator(blocks, origin=self, pad_mode=pad_mode)
      +            
      61    def split_into_blocks(
      +62        self,
      +63        block_shapes: PerMember[PerAxis[int]],
      +64        halo: PerMember[PerAxis[HaloLike]],
      +65        pad_mode: PadMode,
      +66        broadcast: bool = False,
      +67    ) -> Tuple[TotalNumberOfBlocks, Iterable[SampleBlockWithOrigin]]:
      +68        assert not (
      +69            missing := [m for m in block_shapes if m not in self.members]
      +70        ), f"`block_shapes` specified for unknown members: {missing}"
      +71        assert not (
      +72            missing := [m for m in halo if m not in block_shapes]
      +73        ), f"`halo` specified for members without `block_shape`: {missing}"
      +74
      +75        n_blocks, blocks = split_multiple_shapes_into_blocks(
      +76            shapes=self.shape,
      +77            block_shapes=block_shapes,
      +78            halo=halo,
      +79            broadcast=broadcast,
      +80        )
      +81        return n_blocks, sample_block_generator(blocks, origin=self, pad_mode=pad_mode)
       
      @@ -3531,36 +3727,36 @@
      Arguments:
      def - as_single_block( self, halo: Optional[Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, bioimageio.core.common.Halo]]] = None): + as_single_block( self, halo: Optional[Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[AxisId, bioimageio.core.common.Halo]]] = None):
      -
       84    def as_single_block(self, halo: Optional[PerMember[PerAxis[Halo]]] = None):
      - 85        if halo is None:
      - 86            halo = {}
      - 87        return SampleBlockWithOrigin(
      - 88            sample_shape=self.shape,
      - 89            sample_id=self.id,
      - 90            blocks={
      - 91                m: Block(
      - 92                    sample_shape=self.shape[m],
      - 93                    data=data,
      - 94                    inner_slice={
      - 95                        a: SliceInfo(0, s) for a, s in data.tagged_shape.items()
      - 96                    },
      - 97                    halo=halo.get(m, {}),
      - 98                    block_index=0,
      - 99                    blocks_in_sample=1,
      -100                )
      -101                for m, data in self.members.items()
      -102            },
      -103            stat=self.stat,
      -104            origin=self,
      -105            block_index=0,
      -106            blocks_in_sample=1,
      -107        )
      +            
       83    def as_single_block(self, halo: Optional[PerMember[PerAxis[Halo]]] = None):
      + 84        if halo is None:
      + 85            halo = {}
      + 86        return SampleBlockWithOrigin(
      + 87            sample_shape=self.shape,
      + 88            sample_id=self.id,
      + 89            blocks={
      + 90                m: Block(
      + 91                    sample_shape=self.shape[m],
      + 92                    data=data,
      + 93                    inner_slice={
      + 94                        a: SliceInfo(0, s) for a, s in data.tagged_shape.items()
      + 95                    },
      + 96                    halo=halo.get(m, {}),
      + 97                    block_index=0,
      + 98                    blocks_in_sample=1,
      + 99                )
      +100                for m, data in self.members.items()
      +101            },
      +102            stat=self.stat,
      +103            origin=self,
      +104            block_index=0,
      +105            blocks_in_sample=1,
      +106        )
       
      @@ -3573,45 +3769,45 @@
      Arguments:
      @classmethod
      def - from_blocks( cls, sample_blocks: Iterable[bioimageio.core.sample.SampleBlock], *, fill_value: float = nan) -> Self: + from_blocks( cls, sample_blocks: Iterable[bioimageio.core.sample.SampleBlock], *, fill_value: float = nan) -> Self:
      -
      109    @classmethod
      -110    def from_blocks(
      -111        cls,
      -112        sample_blocks: Iterable[SampleBlock],
      -113        *,
      -114        fill_value: float = float("nan"),
      -115    ) -> Self:
      -116        members: PerMember[Tensor] = {}
      -117        stat: Stat = {}
      -118        sample_id = None
      -119        for sample_block in sample_blocks:
      -120            assert sample_id is None or sample_id == sample_block.sample_id
      -121            sample_id = sample_block.sample_id
      -122            stat = sample_block.stat
      -123            for m, block in sample_block.blocks.items():
      -124                if m not in members:
      -125                    if -1 in block.sample_shape.values():
      -126                        raise NotImplementedError(
      -127                            "merging blocks with data dependent axis not yet implemented"
      -128                        )
      -129
      -130                    members[m] = Tensor(
      -131                        np.full(
      -132                            tuple(block.sample_shape[a] for a in block.data.dims),
      -133                            fill_value,
      -134                            dtype=block.data.dtype,
      -135                        ),
      -136                        dims=block.data.dims,
      -137                    )
      -138
      -139                members[m][block.inner_slice] = block.inner_data
      -140
      -141        return cls(members=members, stat=stat, id=sample_id)
      +            
      108    @classmethod
      +109    def from_blocks(
      +110        cls,
      +111        sample_blocks: Iterable[SampleBlock],
      +112        *,
      +113        fill_value: float = float("nan"),
      +114    ) -> Self:
      +115        members: PerMember[Tensor] = {}
      +116        stat: Stat = {}
      +117        sample_id = None
      +118        for sample_block in sample_blocks:
      +119            assert sample_id is None or sample_id == sample_block.sample_id
      +120            sample_id = sample_block.sample_id
      +121            stat = sample_block.stat
      +122            for m, block in sample_block.blocks.items():
      +123                if m not in members:
      +124                    if -1 in block.sample_shape.values():
      +125                        raise NotImplementedError(
      +126                            "merging blocks with data dependent axis not yet implemented"
      +127                        )
      +128
      +129                    members[m] = Tensor(
      +130                        np.full(
      +131                            tuple(block.sample_shape[a] for a in block.data.dims),
      +132                            fill_value,
      +133                            dtype=block.data.dtype,
      +134                        ),
      +135                        dims=block.data.dims,
      +136                    )
      +137
      +138                members[m][block.inner_slice] = block.inner_data
      +139
      +140        return cls(members=members, stat=stat, id=sample_id)
       
      @@ -3624,58 +3820,69 @@
      Arguments:
      def - save_bioimageio_package_as_folder( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], Dict[str, YamlValue], Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.application.v0_3.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.dataset.v0_3.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.notebook.v0_2.NotebookDescr, bioimageio.spec.notebook.v0_3.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.generic.v0_3.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], /, *, output_path: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='dir')], NoneType] = None, weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None) -> Annotated[pathlib.Path, PathType(path_type='dir')]: + save_bioimageio_package_as_folder( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], Dict[str, YamlValue], Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.NotebookDescr, bioimageio.spec.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], /, *, output_path: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='dir')], NoneType] = None, weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None) -> Annotated[pathlib.Path, PathType(path_type='dir')]:
      -
      123def save_bioimageio_package_as_folder(
      -124    source: Union[BioimageioYamlSource, ResourceDescr],
      -125    /,
      -126    *,
      -127    output_path: Union[NewPath, DirectoryPath, None] = None,
      -128    weights_priority_order: Optional[  # model only
      -129        Sequence[
      -130            Literal[
      -131                "keras_hdf5",
      -132                "onnx",
      -133                "pytorch_state_dict",
      -134                "tensorflow_js",
      -135                "tensorflow_saved_model_bundle",
      -136                "torchscript",
      -137            ]
      -138        ]
      -139    ] = None,
      -140) -> DirectoryPath:
      -141    """Write the content of a bioimage.io resource package to a folder.
      -142
      -143    Args:
      -144        source: bioimageio resource description
      -145        output_path: file path to write package to
      -146        weights_priority_order: If given only the first weights format present in the model is included.
      -147                                If none of the prioritized weights formats is found all are included.
      -148
      -149    Returns:
      -150        directory path to bioimageio package folder
      -151    """
      -152    package_content = _prepare_resource_package(
      -153        source,
      -154        weights_priority_order=weights_priority_order,
      -155    )
      -156    if output_path is None:
      -157        output_path = Path(mkdtemp())
      -158    else:
      -159        output_path = Path(output_path)
      -160
      -161    output_path.mkdir(exist_ok=True, parents=True)
      -162    for name, source in package_content.items():
      -163        if isinstance(source, collections.abc.Mapping):
      -164            write_yaml(cast(YamlValue, source), output_path / name)
      -165        else:
      -166            shutil.copy(source, output_path / name)
      -167
      -168    return output_path
      +            
      121def save_bioimageio_package_as_folder(
      +122    source: Union[BioimageioYamlSource, ResourceDescr],
      +123    /,
      +124    *,
      +125    output_path: Union[NewPath, DirectoryPath, None] = None,
      +126    weights_priority_order: Optional[  # model only
      +127        Sequence[
      +128            Literal[
      +129                "keras_hdf5",
      +130                "onnx",
      +131                "pytorch_state_dict",
      +132                "tensorflow_js",
      +133                "tensorflow_saved_model_bundle",
      +134                "torchscript",
      +135            ]
      +136        ]
      +137    ] = None,
      +138) -> DirectoryPath:
      +139    """Write the content of a bioimage.io resource package to a folder.
      +140
      +141    Args:
      +142        source: bioimageio resource description
      +143        output_path: file path to write package to
      +144        weights_priority_order: If given only the first weights format present in the model is included.
      +145                                If none of the prioritized weights formats is found all are included.
      +146
      +147    Returns:
      +148        directory path to bioimageio package folder
      +149    """
      +150    package_content = _prepare_resource_package(
      +151        source,
      +152        weights_priority_order=weights_priority_order,
      +153    )
      +154    if output_path is None:
      +155        output_path = Path(mkdtemp())
      +156    else:
      +157        output_path = Path(output_path)
      +158
      +159    output_path.mkdir(exist_ok=True, parents=True)
      +160    for name, src in package_content.items():
      +161        if isinstance(src, collections.abc.Mapping):
      +162            write_yaml(cast(YamlValue, src), output_path / name)
      +163        elif isinstance(src, ZipPath):
      +164            extracted = Path(src.root.extract(src.name, output_path))
      +165            if extracted.name != src.name:
      +166                try:
      +167                    shutil.move(str(extracted), output_path / src.name)
      +168                except Exception as e:
      +169                    raise RuntimeError(
      +170                        f"Failed to rename extracted file '{extracted.name}'"
      +171                        + f" to '{src.name}'."
      +172                        + f" (extracted from '{src.name}' in '{src.root.filename}')"
      +173                    ) from e
      +174        else:
      +175            shutil.copy(src, output_path / name)
      +176
      +177    return output_path
       
      @@ -3704,71 +3911,71 @@
      Returns:
      def - save_bioimageio_package( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], Dict[str, YamlValue], Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.application.v0_3.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.dataset.v0_3.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.notebook.v0_2.NotebookDescr, bioimageio.spec.notebook.v0_3.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.generic.v0_3.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], /, *, compression: int = 8, compression_level: int = 1, output_path: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='file')], NoneType] = None, weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None) -> Annotated[pathlib.Path, PathType(path_type='file')]: + save_bioimageio_package( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], Dict[str, YamlValue], Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.NotebookDescr, bioimageio.spec.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], /, *, compression: int = 8, compression_level: int = 1, output_path: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='file')], NoneType] = None, weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None) -> Annotated[pathlib.Path, PathType(path_type='file')]:
      -
      171def save_bioimageio_package(
      -172    source: Union[BioimageioYamlSource, ResourceDescr],
      -173    /,
      -174    *,
      -175    compression: int = ZIP_DEFLATED,
      -176    compression_level: int = 1,
      -177    output_path: Union[NewPath, FilePath, None] = None,
      -178    weights_priority_order: Optional[  # model only
      -179        Sequence[
      -180            Literal[
      -181                "keras_hdf5",
      -182                "onnx",
      -183                "pytorch_state_dict",
      -184                "tensorflow_js",
      -185                "tensorflow_saved_model_bundle",
      -186                "torchscript",
      -187            ]
      -188        ]
      -189    ] = None,
      -190) -> FilePath:
      -191    """Package a bioimageio resource as a zip file.
      -192
      -193    Args:
      -194        rd: bioimageio resource description
      -195        compression: The numeric constant of compression method.
      -196        compression_level: Compression level to use when writing files to the archive.
      -197                           See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
      -198        output_path: file path to write package to
      -199        weights_priority_order: If given only the first weights format present in the model is included.
      -200                                If none of the prioritized weights formats is found all are included.
      +            
      180def save_bioimageio_package(
      +181    source: Union[BioimageioYamlSource, ResourceDescr],
      +182    /,
      +183    *,
      +184    compression: int = ZIP_DEFLATED,
      +185    compression_level: int = 1,
      +186    output_path: Union[NewPath, FilePath, None] = None,
      +187    weights_priority_order: Optional[  # model only
      +188        Sequence[
      +189            Literal[
      +190                "keras_hdf5",
      +191                "onnx",
      +192                "pytorch_state_dict",
      +193                "tensorflow_js",
      +194                "tensorflow_saved_model_bundle",
      +195                "torchscript",
      +196            ]
      +197        ]
      +198    ] = None,
      +199) -> FilePath:
      +200    """Package a bioimageio resource as a zip file.
       201
      -202    Returns:
      -203        path to zipped bioimageio package
      -204    """
      -205    package_content = _prepare_resource_package(
      -206        source,
      -207        weights_priority_order=weights_priority_order,
      -208    )
      -209    if output_path is None:
      -210        output_path = Path(
      -211            NamedTemporaryFile(suffix=".bioimageio.zip", delete=False).name
      -212        )
      -213    else:
      -214        output_path = Path(output_path)
      -215
      -216    write_zip(
      -217        output_path,
      -218        package_content,
      -219        compression=compression,
      -220        compression_level=compression_level,
      -221    )
      -222    with validation_context_var.get().replace(warning_level=ERROR):
      -223        if isinstance((exported := load_description(output_path)), InvalidDescr):
      -224            raise ValueError(
      -225                f"Exported package '{output_path}' is invalid:"
      -226                + f" {exported.validation_summary}"
      -227            )
      -228
      -229    return output_path
      +202    Args:
      +203        rd: bioimageio resource description
      +204        compression: The numeric constant of compression method.
      +205        compression_level: Compression level to use when writing files to the archive.
      +206                           See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
      +207        output_path: file path to write package to
      +208        weights_priority_order: If given only the first weights format present in the model is included.
      +209                                If none of the prioritized weights formats is found all are included.
      +210
      +211    Returns:
      +212        path to zipped bioimageio package
      +213    """
      +214    package_content = _prepare_resource_package(
      +215        source,
      +216        weights_priority_order=weights_priority_order,
      +217    )
      +218    if output_path is None:
      +219        output_path = Path(
      +220            NamedTemporaryFile(suffix=".bioimageio.zip", delete=False).name
      +221        )
      +222    else:
      +223        output_path = Path(output_path)
      +224
      +225    write_zip(
      +226        output_path,
      +227        package_content,
      +228        compression=compression,
      +229        compression_level=compression_level,
      +230    )
      +231    with validation_context_var.get().replace(warning_level=ERROR):
      +232        if isinstance((exported := load_description(output_path)), InvalidDescr):
      +233            raise ValueError(
      +234                f"Exported package '{output_path}' is invalid:"
      +235                + f" {exported.validation_summary}"
      +236            )
      +237
      +238    return output_path
       
      @@ -3800,29 +4007,29 @@
      Returns:
      def - save_bioimageio_yaml_only( rd: Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.application.v0_3.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.dataset.v0_3.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.notebook.v0_2.NotebookDescr, bioimageio.spec.notebook.v0_3.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.generic.v0_3.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Dict[str, YamlValue], bioimageio.spec._internal.common_nodes.InvalidDescr], /, file: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='file')], TextIO]): + save_bioimageio_yaml_only( rd: Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.NotebookDescr, bioimageio.spec.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Dict[str, YamlValue], bioimageio.spec.InvalidDescr], /, file: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='file')], TextIO]):
      -
      115def save_bioimageio_yaml_only(
      -116    rd: Union[ResourceDescr, BioimageioYamlContent, InvalidDescr],
      -117    /,
      -118    file: Union[NewPath, FilePath, TextIO],
      -119):
      -120    """write the metadata of a resource description (`rd`) to `file`
      -121    without writing any of the referenced files in it.
      -122
      -123    Note: To save a resource description with its associated files as a package,
      -124    use `save_bioimageio_package` or `save_bioimageio_package_as_folder`.
      -125    """
      -126    if isinstance(rd, ResourceDescrBase):
      -127        content = dump_description(rd)
      -128    else:
      -129        content = rd
      -130
      -131    write_yaml(cast(YamlValue, content), file)
      +            
      118def save_bioimageio_yaml_only(
      +119    rd: Union[ResourceDescr, BioimageioYamlContent, InvalidDescr],
      +120    /,
      +121    file: Union[NewPath, FilePath, TextIO],
      +122):
      +123    """write the metadata of a resource description (`rd`) to `file`
      +124    without writing any of the referenced files in it.
      +125
      +126    Note: To save a resource description with its associated files as a package,
      +127    use `save_bioimageio_package` or `save_bioimageio_package_as_folder`.
      +128    """
      +129    if isinstance(rd, ResourceDescrBase):
      +130        content = dump_description(rd)
      +131    else:
      +132        content = rd
      +133
      +134    write_yaml(cast(YamlValue, content), file)
       
      @@ -3852,7 +4059,7 @@
      Returns:
      Stat = - typing.Dict[typing.Annotated[typing.Union[typing.Annotated[typing.Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Union[float, typing.Annotated[Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]] + typing.Dict[typing.Annotated[typing.Union[typing.Annotated[typing.Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Union[float, typing.Annotated[Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]]
      @@ -4304,7 +4511,7 @@
      Returns:
      -

      A wrapper around an xr.DataArray for better integration with bioimageio.spec +

      A wrapper around an xr.DataArray for better integration with bioimageio.spec and improved type annotations.

      @@ -4313,7 +4520,7 @@
      Returns:
      - Tensor( array: numpy.ndarray[typing.Any, numpy.dtype[typing.Any]], dims: Sequence[Union[bioimageio.spec.model.v0_5.AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Axis]]) + Tensor( array: numpy.ndarray[typing.Any, numpy.dtype[typing.Any]], dims: Sequence[Union[AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Axis]]) @@ -4374,7 +4581,7 @@
      Returns:
      @classmethod
      def - from_numpy( cls, array: numpy.ndarray[typing.Any, numpy.dtype[typing.Any]], *, dims: Union[bioimageio.spec.model.v0_5.AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Axis, Sequence[Union[bioimageio.spec.model.v0_5.AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Axis]], NoneType]) -> Tensor: + from_numpy( cls, array: numpy.ndarray[typing.Any, numpy.dtype[typing.Any]], *, dims: Union[AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Axis, Sequence[Union[AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Axis]], NoneType]) -> Tensor: @@ -4547,7 +4754,7 @@
      Raises:
      def - sum( self, dim: Union[bioimageio.spec.model.v0_5.AxisId, Sequence[bioimageio.spec.model.v0_5.AxisId], NoneType] = None) -> Self: + sum( self, dim: Union[AxisId, Sequence[AxisId], NoneType] = None) -> Self: @@ -4681,7 +4888,7 @@
      Raises:
      def - crop_to( self, sizes: Mapping[bioimageio.spec.model.v0_5.AxisId, int], crop_where: Union[Literal['left', 'right', 'left_and_right'], Mapping[bioimageio.spec.model.v0_5.AxisId, Literal['left', 'right', 'left_and_right']]] = 'left_and_right') -> Self: + crop_to( self, sizes: Mapping[AxisId, int], crop_where: Union[Literal['left', 'right', 'left_and_right'], Mapping[AxisId, Literal['left', 'right', 'left_and_right']]] = 'left_and_right') -> Self: @@ -4744,7 +4951,7 @@
      Raises:
      def - expand_dims( self, dims: Union[Sequence[bioimageio.spec.model.v0_5.AxisId], Mapping[bioimageio.spec.model.v0_5.AxisId, int]]) -> Self: + expand_dims( self, dims: Union[Sequence[AxisId], Mapping[AxisId, int]]) -> Self: @@ -4763,7 +4970,7 @@
      Raises:
      def - mean( self, dim: Union[bioimageio.spec.model.v0_5.AxisId, Sequence[bioimageio.spec.model.v0_5.AxisId], NoneType] = None) -> Self: + mean( self, dim: Union[AxisId, Sequence[AxisId], NoneType] = None) -> Self: @@ -4782,7 +4989,7 @@
      Raises:
      def - std( self, dim: Union[bioimageio.spec.model.v0_5.AxisId, Sequence[bioimageio.spec.model.v0_5.AxisId], NoneType] = None) -> Self: + std( self, dim: Union[AxisId, Sequence[AxisId], NoneType] = None) -> Self: @@ -4801,7 +5008,7 @@
      Raises:
      def - var( self, dim: Union[bioimageio.spec.model.v0_5.AxisId, Sequence[bioimageio.spec.model.v0_5.AxisId], NoneType] = None) -> Self: + var( self, dim: Union[AxisId, Sequence[AxisId], NoneType] = None) -> Self: @@ -4820,7 +5027,7 @@
      Raises:
      def - pad( self, pad_width: Mapping[bioimageio.spec.model.v0_5.AxisId, Union[int, Tuple[int, int], bioimageio.core.common.PadWidth]], mode: Literal['edge', 'reflect', 'symmetric'] = 'symmetric') -> Self: + pad( self, pad_width: Mapping[AxisId, Union[int, Tuple[int, int], bioimageio.core.common.PadWidth]], mode: Literal['edge', 'reflect', 'symmetric'] = 'symmetric') -> Self: @@ -4846,7 +5053,7 @@
      Raises:
      def - pad_to( self, sizes: Mapping[bioimageio.spec.model.v0_5.AxisId, int], pad_where: Union[Literal['left', 'right', 'left_and_right'], Mapping[bioimageio.spec.model.v0_5.AxisId, Literal['left', 'right', 'left_and_right']]] = 'left_and_right', mode: Literal['edge', 'reflect', 'symmetric'] = 'symmetric') -> Self: + pad_to( self, sizes: Mapping[AxisId, int], pad_where: Union[Literal['left', 'right', 'left_and_right'], Mapping[AxisId, Literal['left', 'right', 'left_and_right']]] = 'left_and_right', mode: Literal['edge', 'reflect', 'symmetric'] = 'symmetric') -> Self: @@ -4906,7 +5113,7 @@
      Raises:
      def - quantile( self, q: Union[float, Sequence[float]], dim: Union[bioimageio.spec.model.v0_5.AxisId, Sequence[bioimageio.spec.model.v0_5.AxisId], NoneType] = None) -> Self: + quantile( self, q: Union[float, Sequence[float]], dim: Union[AxisId, Sequence[AxisId], NoneType] = None) -> Self: @@ -4944,7 +5151,7 @@
      Raises:
      def - resize_to( self, sizes: Mapping[bioimageio.spec.model.v0_5.AxisId, int], *, pad_where: Union[Literal['left', 'right', 'left_and_right'], Mapping[bioimageio.spec.model.v0_5.AxisId, Literal['left', 'right', 'left_and_right']]] = 'left_and_right', crop_where: Union[Literal['left', 'right', 'left_and_right'], Mapping[bioimageio.spec.model.v0_5.AxisId, Literal['left', 'right', 'left_and_right']]] = 'left_and_right', pad_mode: Literal['edge', 'reflect', 'symmetric'] = 'symmetric'): + resize_to( self, sizes: Mapping[AxisId, int], *, pad_where: Union[Literal['left', 'right', 'left_and_right'], Mapping[AxisId, Literal['left', 'right', 'left_and_right']]] = 'left_and_right', crop_where: Union[Literal['left', 'right', 'left_and_right'], Mapping[AxisId, Literal['left', 'right', 'left_and_right']]] = 'left_and_right', pad_mode: Literal['edge', 'reflect', 'symmetric'] = 'symmetric'): @@ -5002,7 +5209,7 @@
      Raises:
      def - transpose(self, axes: Sequence[bioimageio.spec.model.v0_5.AxisId]) -> Self: + transpose(self, axes: Sequence[AxisId]) -> Self: @@ -5045,36 +5252,37 @@
      Arguments:
      def - test_description( source: Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.application.v0_3.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.dataset.v0_3.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.notebook.v0_2.NotebookDescr, bioimageio.spec.notebook.v0_3.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.generic.v0_3.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], Dict[str, YamlValue]], *, format_version: Union[Literal['discover', 'latest'], str] = 'discover', weight_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']] = None, devices: Optional[Sequence[str]] = None, absolute_tolerance: float = 0.00015, relative_tolerance: float = 0.0001, decimal: Optional[int] = None, expected_type: Optional[str] = None) -> bioimageio.spec.summary.ValidationSummary: + test_description( source: Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.NotebookDescr, bioimageio.spec.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], Dict[str, YamlValue]], *, format_version: Union[Literal['discover', 'latest'], str] = 'discover', weight_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']] = None, devices: Optional[Sequence[str]] = None, absolute_tolerance: float = 0.00015, relative_tolerance: float = 0.0001, decimal: Optional[int] = None, determinism: Literal['seed_only', 'full'] = 'seed_only', expected_type: Optional[str] = None) -> bioimageio.spec.ValidationSummary:
      -
      56def test_description(
      -57    source: Union[ResourceDescr, PermissiveFileSource, BioimageioYamlContent],
      -58    *,
      -59    format_version: Union[Literal["discover", "latest"], str] = "discover",
      -60    weight_format: Optional[WeightsFormat] = None,
      -61    devices: Optional[Sequence[str]] = None,
      -62    absolute_tolerance: float = 1.5e-4,
      -63    relative_tolerance: float = 1e-4,
      -64    decimal: Optional[int] = None,
      -65    expected_type: Optional[str] = None,
      -66) -> ValidationSummary:
      -67    """Test a bioimage.io resource dynamically, e.g. prediction of test tensors for models"""
      -68    # NOTE: `decimal` is a legacy argument and is handled in `_test_model_inference`
      -69    rd = load_description_and_test(
      -70        source,
      -71        format_version=format_version,
      -72        weight_format=weight_format,
      -73        devices=devices,
      -74        absolute_tolerance=absolute_tolerance,
      -75        relative_tolerance=relative_tolerance,
      -76        decimal=decimal,
      -77        expected_type=expected_type,
      -78    )
      -79    return rd.validation_summary
      +            
      120def test_description(
      +121    source: Union[ResourceDescr, PermissiveFileSource, BioimageioYamlContent],
      +122    *,
      +123    format_version: Union[Literal["discover", "latest"], str] = "discover",
      +124    weight_format: Optional[WeightsFormat] = None,
      +125    devices: Optional[Sequence[str]] = None,
      +126    absolute_tolerance: float = 1.5e-4,
      +127    relative_tolerance: float = 1e-4,
      +128    decimal: Optional[int] = None,
      +129    determinism: Literal["seed_only", "full"] = "seed_only",
      +130    expected_type: Optional[str] = None,
      +131) -> ValidationSummary:
      +132    """Test a bioimage.io resource dynamically, e.g. prediction of test tensors for models"""
      +133    rd = load_description_and_test(
      +134        source,
      +135        format_version=format_version,
      +136        weight_format=weight_format,
      +137        devices=devices,
      +138        absolute_tolerance=absolute_tolerance,
      +139        relative_tolerance=relative_tolerance,
      +140        decimal=decimal,
      +141        determinism=determinism,
      +142        expected_type=expected_type,
      +143    )
      +144    return rd.validation_summary
       
      @@ -5088,31 +5296,33 @@
      Arguments:
      def - test_model( source: Union[bioimageio.spec.model.v0_5.ModelDescr, Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)]], weight_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']] = None, devices: Optional[List[str]] = None, absolute_tolerance: float = 0.00015, relative_tolerance: float = 0.0001, decimal: Optional[int] = None) -> bioimageio.spec.summary.ValidationSummary: + test_model( source: Union[bioimageio.spec.ModelDescr, Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)]], weight_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']] = None, devices: Optional[List[str]] = None, absolute_tolerance: float = 0.00015, relative_tolerance: float = 0.0001, decimal: Optional[int] = None, *, determinism: Literal['seed_only', 'full'] = 'seed_only') -> bioimageio.spec.ValidationSummary:
      -
      35def test_model(
      -36    source: Union[v0_5.ModelDescr, PermissiveFileSource],
      -37    weight_format: Optional[WeightsFormat] = None,
      -38    devices: Optional[List[str]] = None,
      -39    absolute_tolerance: float = 1.5e-4,
      -40    relative_tolerance: float = 1e-4,
      -41    decimal: Optional[int] = None,
      -42) -> ValidationSummary:
      -43    """Test model inference"""
      -44    # NOTE: `decimal` is a legacy argument and is handled in `_test_model_inference`
      -45    return test_description(
      -46        source,
      -47        weight_format=weight_format,
      -48        devices=devices,
      -49        absolute_tolerance=absolute_tolerance,
      -50        relative_tolerance=relative_tolerance,
      -51        decimal=decimal,
      -52        expected_type="model",
      -53    )
      +            
       97def test_model(
      + 98    source: Union[v0_5.ModelDescr, PermissiveFileSource],
      + 99    weight_format: Optional[WeightsFormat] = None,
      +100    devices: Optional[List[str]] = None,
      +101    absolute_tolerance: float = 1.5e-4,
      +102    relative_tolerance: float = 1e-4,
      +103    decimal: Optional[int] = None,
      +104    *,
      +105    determinism: Literal["seed_only", "full"] = "seed_only",
      +106) -> ValidationSummary:
      +107    """Test model inference"""
      +108    return test_description(
      +109        source,
      +110        weight_format=weight_format,
      +111        devices=devices,
      +112        absolute_tolerance=absolute_tolerance,
      +113        relative_tolerance=relative_tolerance,
      +114        decimal=decimal,
      +115        determinism=determinism,
      +116        expected_type="model",
      +117    )
       
      @@ -5126,40 +5336,41 @@
      Arguments:
      def - test_resource( source: Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.application.v0_3.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.dataset.v0_3.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.notebook.v0_2.NotebookDescr, bioimageio.spec.notebook.v0_3.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.generic.v0_3.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], Dict[str, YamlValue]], *, format_version: Union[Literal['discover', 'latest'], str] = 'discover', weight_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']] = None, devices: Optional[Sequence[str]] = None, absolute_tolerance: float = 0.00015, relative_tolerance: float = 0.0001, decimal: Optional[int] = None, expected_type: Optional[str] = None) -> bioimageio.spec.summary.ValidationSummary: + test_resource( source: Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.NotebookDescr, bioimageio.spec.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], Dict[str, YamlValue]], *, format_version: Union[Literal['discover', 'latest'], str] = 'discover', weight_format: Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']] = None, devices: Optional[Sequence[str]] = None, absolute_tolerance: float = 0.00015, relative_tolerance: float = 0.0001, decimal: Optional[int] = None, determinism: Literal['seed_only', 'full'] = 'seed_only', expected_type: Optional[str] = None) -> bioimageio.spec.ValidationSummary:
      -
      56def test_description(
      -57    source: Union[ResourceDescr, PermissiveFileSource, BioimageioYamlContent],
      -58    *,
      -59    format_version: Union[Literal["discover", "latest"], str] = "discover",
      -60    weight_format: Optional[WeightsFormat] = None,
      -61    devices: Optional[Sequence[str]] = None,
      -62    absolute_tolerance: float = 1.5e-4,
      -63    relative_tolerance: float = 1e-4,
      -64    decimal: Optional[int] = None,
      -65    expected_type: Optional[str] = None,
      -66) -> ValidationSummary:
      -67    """Test a bioimage.io resource dynamically, e.g. prediction of test tensors for models"""
      -68    # NOTE: `decimal` is a legacy argument and is handled in `_test_model_inference`
      -69    rd = load_description_and_test(
      -70        source,
      -71        format_version=format_version,
      -72        weight_format=weight_format,
      -73        devices=devices,
      -74        absolute_tolerance=absolute_tolerance,
      -75        relative_tolerance=relative_tolerance,
      -76        decimal=decimal,
      -77        expected_type=expected_type,
      -78    )
      -79    return rd.validation_summary
      +            
      120def test_description(
      +121    source: Union[ResourceDescr, PermissiveFileSource, BioimageioYamlContent],
      +122    *,
      +123    format_version: Union[Literal["discover", "latest"], str] = "discover",
      +124    weight_format: Optional[WeightsFormat] = None,
      +125    devices: Optional[Sequence[str]] = None,
      +126    absolute_tolerance: float = 1.5e-4,
      +127    relative_tolerance: float = 1e-4,
      +128    decimal: Optional[int] = None,
      +129    determinism: Literal["seed_only", "full"] = "seed_only",
      +130    expected_type: Optional[str] = None,
      +131) -> ValidationSummary:
      +132    """Test a bioimage.io resource dynamically, e.g. prediction of test tensors for models"""
      +133    rd = load_description_and_test(
      +134        source,
      +135        format_version=format_version,
      +136        weight_format=weight_format,
      +137        devices=devices,
      +138        absolute_tolerance=absolute_tolerance,
      +139        relative_tolerance=relative_tolerance,
      +140        decimal=decimal,
      +141        determinism=determinism,
      +142        expected_type=expected_type,
      +143    )
      +144    return rd.validation_summary
       
      -

      Test a bioimage.io resource dynamically, e.g. prediction of test tensors for models

      + @@ -5169,25 +5380,25 @@
      Arguments:
      def - validate_format( data: Dict[str, YamlValue], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', context: Optional[bioimageio.spec._internal.validation_context.ValidationContext] = None) -> bioimageio.spec.summary.ValidationSummary: + validate_format( data: Dict[str, YamlValue], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', context: Optional[bioimageio.spec.ValidationContext] = None) -> bioimageio.spec.ValidationSummary:
      -
      159def validate_format(
      -160    data: BioimageioYamlContent,
      -161    /,
      -162    *,
      -163    format_version: Union[Literal["discover", "latest"], str] = DISCOVER,
      -164    context: Optional[ValidationContext] = None,
      -165) -> ValidationSummary:
      -166    """validate a bioimageio.yaml file (RDF)"""
      -167    with context or validation_context_var.get():
      -168        rd = build_description(data, format_version=format_version)
      -169
      -170    assert rd.validation_summary is not None
      -171    return rd.validation_summary
      +            
      161def validate_format(
      +162    data: BioimageioYamlContent,
      +163    /,
      +164    *,
      +165    format_version: Union[Literal["discover", "latest"], str] = DISCOVER,
      +166    context: Optional[ValidationContext] = None,
      +167) -> ValidationSummary:
      +168    """validate a bioimageio.yaml file (RDF)"""
      +169    with context or validation_context_var.get():
      +170        rd = build_description(data, format_version=format_version)
      +171
      +172    assert rd.validation_summary is not None
      +173    return rd.validation_summary
       
      diff --git a/bioimageio/core/axis.html b/bioimageio/core/axis.html new file mode 100644 index 00000000..5c731441 --- /dev/null +++ b/bioimageio/core/axis.html @@ -0,0 +1,733 @@ + + + + + + + bioimageio.core.axis API documentation + + + + + + + + + + +
      +
      +

      +bioimageio.core.axis

      + + + + + + +
       1from __future__ import annotations
      + 2
      + 3from dataclasses import dataclass
      + 4from typing import Literal, Mapping, Optional, TypeVar, Union
      + 5
      + 6from typing_extensions import assert_never
      + 7
      + 8from bioimageio.spec.model import v0_5
      + 9
      +10
      +11def _get_axis_type(a: Literal["b", "t", "i", "c", "x", "y", "z"]):
      +12    if a == "b":
      +13        return "batch"
      +14    elif a == "t":
      +15        return "time"
      +16    elif a == "i":
      +17        return "index"
      +18    elif a == "c":
      +19        return "channel"
      +20    elif a in ("x", "y", "z"):
      +21        return "space"
      +22    else:
      +23        return "index"  # return most unspecific axis
      +24
      +25
      +26S = TypeVar("S", bound=str)
      +27
      +28
      +29AxisId = v0_5.AxisId
      +30
      +31T = TypeVar("T")
      +32PerAxis = Mapping[AxisId, T]
      +33
      +34BatchSize = int
      +35
      +36AxisLetter = Literal["b", "i", "t", "c", "z", "y", "x"]
      +37AxisLike = Union[AxisId, AxisLetter, v0_5.AnyAxis, "Axis"]
      +38
      +39
      +40@dataclass
      +41class Axis:
      +42    id: AxisId
      +43    type: Literal["batch", "channel", "index", "space", "time"]
      +44
      +45    @classmethod
      +46    def create(cls, axis: AxisLike) -> Axis:
      +47        if isinstance(axis, cls):
      +48            return axis
      +49        elif isinstance(axis, Axis):
      +50            return Axis(id=axis.id, type=axis.type)
      +51        elif isinstance(axis, str):
      +52            return Axis(id=AxisId(axis), type=_get_axis_type(axis))
      +53        elif isinstance(axis, v0_5.AxisBase):
      +54            return Axis(id=AxisId(axis.id), type=axis.type)
      +55        else:
      +56            assert_never(axis)
      +57
      +58
      +59@dataclass
      +60class AxisInfo(Axis):
      +61    maybe_singleton: bool  # TODO: replace 'maybe_singleton' with size min/max for better axis guessing
      +62
      +63    @classmethod
      +64    def create(cls, axis: AxisLike, maybe_singleton: Optional[bool] = None) -> AxisInfo:
      +65        if isinstance(axis, AxisInfo):
      +66            return axis
      +67
      +68        axis_base = super().create(axis)
      +69        if maybe_singleton is None:
      +70            if isinstance(axis, (Axis, str)):
      +71                maybe_singleton = True
      +72            else:
      +73                if axis.size is None:
      +74                    maybe_singleton = True
      +75                elif isinstance(axis.size, int):
      +76                    maybe_singleton = axis.size == 1
      +77                elif isinstance(axis.size, v0_5.SizeReference):
      +78                    maybe_singleton = (
      +79                        True  # TODO: check if singleton is ok for a `SizeReference`
      +80                    )
      +81                elif isinstance(
      +82                    axis.size, (v0_5.ParameterizedSize, v0_5.DataDependentSize)
      +83                ):
      +84                    try:
      +85                        maybe_size_one = axis.size.validate_size(
      +86                            1
      +87                        )  # TODO: refactor validate_size() to have boolean func here
      +88                    except ValueError:
      +89                        maybe_singleton = False
      +90                    else:
      +91                        maybe_singleton = maybe_size_one == 1
      +92                else:
      +93                    assert_never(axis.size)
      +94
      +95        return AxisInfo(
      +96            id=axis_base.id, type=axis_base.type, maybe_singleton=maybe_singleton
      +97        )
      +
      + + +
      +
      + +
      + + class + AxisId(bioimageio.spec._internal.types.LowerCaseIdentifier): + + + +
      + +
      199class AxisId(LowerCaseIdentifier):
      +200    root_model: ClassVar[Type[RootModel[Any]]] = RootModel[
      +201        Annotated[LowerCaseIdentifierAnno, MaxLen(16)]
      +202    ]
      +
      + + +

      str(object='') -> str +str(bytes_or_buffer[, encoding[, errors]]) -> str

      + +

      Create a new string object from the given object. If encoding or +errors is specified, then the object must expose a data buffer +that will be decoded using the given encoding and error handler. +Otherwise, returns the result of object.__str__() (if defined) +or repr(object). +encoding defaults to sys.getdefaultencoding(). +errors defaults to 'strict'.

      +
      + + +
      +
      + root_model: ClassVar[Type[pydantic.root_model.RootModel[Any]]] = + + <class 'pydantic.root_model.RootModel[Annotated[str, MinLen, AfterValidator, AfterValidator, Annotated[TypeVar, Predicate], MaxLen]]'> + + +
      + + +

      the pydantic root model to validate the string

      +
      + + +
      +
      +
      +
      + PerAxis = +typing.Mapping[AxisId, ~T] + + +
      + + + + +
      +
      +
      + BatchSize = +<class 'int'> + + +
      + + + + +
      +
      +
      + AxisLetter = +typing.Literal['b', 'i', 't', 'c', 'z', 'y', 'x'] + + +
      + + + + +
      +
      +
      + AxisLike = + + typing.Union[AxisId, typing.Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], typing.Annotated[typing.Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], typing.Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[typing.Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], typing.Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], ForwardRef('Axis')] + + +
      + + + + +
      +
      + +
      +
      @dataclass
      + + class + Axis: + + + +
      + +
      41@dataclass
      +42class Axis:
      +43    id: AxisId
      +44    type: Literal["batch", "channel", "index", "space", "time"]
      +45
      +46    @classmethod
      +47    def create(cls, axis: AxisLike) -> Axis:
      +48        if isinstance(axis, cls):
      +49            return axis
      +50        elif isinstance(axis, Axis):
      +51            return Axis(id=axis.id, type=axis.type)
      +52        elif isinstance(axis, str):
      +53            return Axis(id=AxisId(axis), type=_get_axis_type(axis))
      +54        elif isinstance(axis, v0_5.AxisBase):
      +55            return Axis(id=AxisId(axis.id), type=axis.type)
      +56        else:
      +57            assert_never(axis)
      +
      + + + + +
      +
      + + Axis( id: AxisId, type: Literal['batch', 'channel', 'index', 'space', 'time']) + + +
      + + + + +
      +
      +
      + id: AxisId + + +
      + + + + +
      +
      +
      + type: Literal['batch', 'channel', 'index', 'space', 'time'] + + +
      + + + + +
      +
      + +
      +
      @classmethod
      + + def + create( cls, axis: Union[AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Axis]) -> Axis: + + + +
      + +
      46    @classmethod
      +47    def create(cls, axis: AxisLike) -> Axis:
      +48        if isinstance(axis, cls):
      +49            return axis
      +50        elif isinstance(axis, Axis):
      +51            return Axis(id=axis.id, type=axis.type)
      +52        elif isinstance(axis, str):
      +53            return Axis(id=AxisId(axis), type=_get_axis_type(axis))
      +54        elif isinstance(axis, v0_5.AxisBase):
      +55            return Axis(id=AxisId(axis.id), type=axis.type)
      +56        else:
      +57            assert_never(axis)
      +
      + + + + +
      +
      +
      + +
      +
      @dataclass
      + + class + AxisInfo(Axis): + + + +
      + +
      60@dataclass
      +61class AxisInfo(Axis):
      +62    maybe_singleton: bool  # TODO: replace 'maybe_singleton' with size min/max for better axis guessing
      +63
      +64    @classmethod
      +65    def create(cls, axis: AxisLike, maybe_singleton: Optional[bool] = None) -> AxisInfo:
      +66        if isinstance(axis, AxisInfo):
      +67            return axis
      +68
      +69        axis_base = super().create(axis)
      +70        if maybe_singleton is None:
      +71            if isinstance(axis, (Axis, str)):
      +72                maybe_singleton = True
      +73            else:
      +74                if axis.size is None:
      +75                    maybe_singleton = True
      +76                elif isinstance(axis.size, int):
      +77                    maybe_singleton = axis.size == 1
      +78                elif isinstance(axis.size, v0_5.SizeReference):
      +79                    maybe_singleton = (
      +80                        True  # TODO: check if singleton is ok for a `SizeReference`
      +81                    )
      +82                elif isinstance(
      +83                    axis.size, (v0_5.ParameterizedSize, v0_5.DataDependentSize)
      +84                ):
      +85                    try:
      +86                        maybe_size_one = axis.size.validate_size(
      +87                            1
      +88                        )  # TODO: refactor validate_size() to have boolean func here
      +89                    except ValueError:
      +90                        maybe_singleton = False
      +91                    else:
      +92                        maybe_singleton = maybe_size_one == 1
      +93                else:
      +94                    assert_never(axis.size)
      +95
      +96        return AxisInfo(
      +97            id=axis_base.id, type=axis_base.type, maybe_singleton=maybe_singleton
      +98        )
      +
      + + + + +
      +
      + + AxisInfo( id: AxisId, type: Literal['batch', 'channel', 'index', 'space', 'time'], maybe_singleton: bool) + + +
      + + + + +
      +
      +
      + maybe_singleton: bool + + +
      + + + + +
      +
      + +
      +
      @classmethod
      + + def + create( cls, axis: Union[AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Axis], maybe_singleton: Optional[bool] = None) -> AxisInfo: + + + +
      + +
      64    @classmethod
      +65    def create(cls, axis: AxisLike, maybe_singleton: Optional[bool] = None) -> AxisInfo:
      +66        if isinstance(axis, AxisInfo):
      +67            return axis
      +68
      +69        axis_base = super().create(axis)
      +70        if maybe_singleton is None:
      +71            if isinstance(axis, (Axis, str)):
      +72                maybe_singleton = True
      +73            else:
      +74                if axis.size is None:
      +75                    maybe_singleton = True
      +76                elif isinstance(axis.size, int):
      +77                    maybe_singleton = axis.size == 1
      +78                elif isinstance(axis.size, v0_5.SizeReference):
      +79                    maybe_singleton = (
      +80                        True  # TODO: check if singleton is ok for a `SizeReference`
      +81                    )
      +82                elif isinstance(
      +83                    axis.size, (v0_5.ParameterizedSize, v0_5.DataDependentSize)
      +84                ):
      +85                    try:
      +86                        maybe_size_one = axis.size.validate_size(
      +87                            1
      +88                        )  # TODO: refactor validate_size() to have boolean func here
      +89                    except ValueError:
      +90                        maybe_singleton = False
      +91                    else:
      +92                        maybe_singleton = maybe_size_one == 1
      +93                else:
      +94                    assert_never(axis.size)
      +95
      +96        return AxisInfo(
      +97            id=axis_base.id, type=axis_base.type, maybe_singleton=maybe_singleton
      +98        )
      +
      + + + + +
      +
      +
      Inherited Members
      +
      +
      Axis
      +
      id
      +
      type
      + +
      +
      +
      +
      +
      + + \ No newline at end of file diff --git a/bioimageio/core/block_meta.html b/bioimageio/core/block_meta.html new file mode 100644 index 00000000..306f25fe --- /dev/null +++ b/bioimageio/core/block_meta.html @@ -0,0 +1,1557 @@ + + + + + + + bioimageio.core.block_meta API documentation + + + + + + + + + + +
      +
      +

      +bioimageio.core.block_meta

      + + + + + + +
        1import itertools
      +  2from dataclasses import dataclass
      +  3from functools import cached_property
      +  4from math import floor, prod
      +  5from typing import (
      +  6    Any,
      +  7    Callable,
      +  8    Collection,
      +  9    Dict,
      + 10    Generator,
      + 11    Iterable,
      + 12    List,
      + 13    Optional,
      + 14    Tuple,
      + 15    Union,
      + 16)
      + 17
      + 18from loguru import logger
      + 19from typing_extensions import Self
      + 20
      + 21from .axis import AxisId, PerAxis
      + 22from .common import (
      + 23    BlockIndex,
      + 24    Frozen,
      + 25    Halo,
      + 26    HaloLike,
      + 27    MemberId,
      + 28    PadWidth,
      + 29    PerMember,
      + 30    SliceInfo,
      + 31    TotalNumberOfBlocks,
      + 32)
      + 33
      + 34
      + 35@dataclass
      + 36class LinearAxisTransform:
      + 37    axis: AxisId
      + 38    scale: float
      + 39    offset: int
      + 40
      + 41    def compute(self, s: int, round: Callable[[float], int] = floor) -> int:
      + 42        return round(s * self.scale) + self.offset
      + 43
      + 44
      + 45@dataclass(frozen=True)
      + 46class BlockMeta:
      + 47    """Block meta data of a sample member (a tensor in a sample)
      + 48
      + 49    Figure for illustration:
      + 50    The first 2d block (dashed) of a sample member (**bold**).
      + 51    The inner slice (thin) is expanded by a halo in both dimensions on both sides.
      + 52    The outer slice reaches from the sample member origin (0, 0) to the right halo point.
      + 53
      + 54    ```terminal
      + 55    ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─  ─ ─ ─ ─ ─ ─ ─ ┐
      + 56    ╷ halo(left)                         ╷
      + 57    ╷                                    ╷
      + 58    ╷  (0, 0)┏━━━━━━━━━━━━━━━━━┯━━━━━━━━━┯━━━➔
      + 59    ╷        ┃                 │         ╷  sample member
      + 60    ╷        ┃      inner      │         ╷
      + 61    ╷        ┃   (and outer)   │  outer  ╷
      + 62    ╷        ┃      slice      │  slice  ╷
      + 63    ╷        ┃                 │         ╷
      + 64    ╷        ┣─────────────────┘         ╷
      + 65    ╷        ┃   outer slice             ╷
      + 66    ╷        ┃               halo(right) ╷
      + 67    └ ─ ─ ─ ─┃─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─┘
      + 68
      + 69    ```
      + 70
      + 71    note:
      + 72    - Inner and outer slices are specified in sample member coordinates.
      + 73    - The outer_slice of a block at the sample edge may overlap by more than the
      + 74        halo with the neighboring block (the inner slices will not overlap though).
      + 75
      + 76    """
      + 77
      + 78    sample_shape: PerAxis[int]
      + 79    """the axis sizes of the whole (unblocked) sample"""
      + 80
      + 81    inner_slice: PerAxis[SliceInfo]
      + 82    """inner region (without halo) wrt the sample"""
      + 83
      + 84    halo: PerAxis[Halo]
      + 85    """halo enlarging the inner region to the block's sizes"""
      + 86
      + 87    block_index: BlockIndex
      + 88    """the i-th block of the sample"""
      + 89
      + 90    blocks_in_sample: TotalNumberOfBlocks
      + 91    """total number of blocks in the sample"""
      + 92
      + 93    @cached_property
      + 94    def shape(self) -> PerAxis[int]:
      + 95        """axis lengths of the block"""
      + 96        return Frozen(
      + 97            {
      + 98                a: s.stop - s.start + (sum(self.halo[a]) if a in self.halo else 0)
      + 99                for a, s in self.inner_slice.items()
      +100            }
      +101        )
      +102
      +103    @cached_property
      +104    def padding(self) -> PerAxis[PadWidth]:
      +105        """padding to realize the halo at the sample edge
      +106        where we cannot simply enlarge the inner slice"""
      +107        return Frozen(
      +108            {
      +109                a: PadWidth(
      +110                    (
      +111                        self.halo[a].left
      +112                        - (self.inner_slice[a].start - self.outer_slice[a].start)
      +113                        if a in self.halo
      +114                        else 0
      +115                    ),
      +116                    (
      +117                        self.halo[a].right
      +118                        - (self.outer_slice[a].stop - self.inner_slice[a].stop)
      +119                        if a in self.halo
      +120                        else 0
      +121                    ),
      +122                )
      +123                for a in self.inner_slice
      +124            }
      +125        )
      +126
      +127    @cached_property
      +128    def outer_slice(self) -> PerAxis[SliceInfo]:
      +129        """slice of the outer block (without padding) wrt the sample"""
      +130        return Frozen(
      +131            {
      +132                a: SliceInfo(
      +133                    max(
      +134                        0,
      +135                        min(
      +136                            self.inner_slice[a].start
      +137                            - (self.halo[a].left if a in self.halo else 0),
      +138                            self.sample_shape[a]
      +139                            - self.inner_shape[a]
      +140                            - (self.halo[a].left if a in self.halo else 0),
      +141                        ),
      +142                    ),
      +143                    min(
      +144                        self.sample_shape[a],
      +145                        self.inner_slice[a].stop
      +146                        + (self.halo[a].right if a in self.halo else 0),
      +147                    ),
      +148                )
      +149                for a in self.inner_slice
      +150            }
      +151        )
      +152
      +153    @cached_property
      +154    def inner_shape(self) -> PerAxis[int]:
      +155        """axis lengths of the inner region (without halo)"""
      +156        return Frozen({a: s.stop - s.start for a, s in self.inner_slice.items()})
      +157
      +158    @cached_property
      +159    def local_slice(self) -> PerAxis[SliceInfo]:
      +160        """inner slice wrt the block, **not** the sample"""
      +161        return Frozen(
      +162            {
      +163                a: SliceInfo(
      +164                    self.halo[a].left,
      +165                    self.halo[a].left + self.inner_shape[a],
      +166                )
      +167                for a in self.inner_slice
      +168            }
      +169        )
      +170
      +171    @property
      +172    def dims(self) -> Collection[AxisId]:
      +173        return set(self.inner_shape)
      +174
      +175    @property
      +176    def tagged_shape(self) -> PerAxis[int]:
      +177        """alias for shape"""
      +178        return self.shape
      +179
      +180    @property
      +181    def inner_slice_wo_overlap(self):
      +182        """subslice of the inner slice, such that all `inner_slice_wo_overlap` can be
      +183        stiched together trivially to form the original sample.
      +184
      +185        This can also be used to calculate statistics
      +186        without overrepresenting block edge regions."""
      +187        # TODO: update inner_slice_wo_overlap when adding block overlap
      +188        return self.inner_slice
      +189
      +190    def __post_init__(self):
      +191        # freeze mutable inputs
      +192        if not isinstance(self.sample_shape, Frozen):
      +193            object.__setattr__(self, "sample_shape", Frozen(self.sample_shape))
      +194
      +195        if not isinstance(self.inner_slice, Frozen):
      +196            object.__setattr__(self, "inner_slice", Frozen(self.inner_slice))
      +197
      +198        if not isinstance(self.halo, Frozen):
      +199            object.__setattr__(self, "halo", Frozen(self.halo))
      +200
      +201        assert all(
      +202            a in self.sample_shape for a in self.inner_slice
      +203        ), "block has axes not present in sample"
      +204
      +205        assert all(
      +206            a in self.inner_slice for a in self.halo
      +207        ), "halo has axes not present in block"
      +208
      +209        if any(s > self.sample_shape[a] for a, s in self.shape.items()):
      +210            logger.warning(
      +211                "block {} larger than sample {}", self.shape, self.sample_shape
      +212            )
      +213
      +214    def get_transformed(
      +215        self, new_axes: PerAxis[Union[LinearAxisTransform, int]]
      +216    ) -> Self:
      +217        return self.__class__(
      +218            sample_shape={
      +219                a: (
      +220                    trf
      +221                    if isinstance(trf, int)
      +222                    else trf.compute(self.sample_shape[trf.axis])
      +223                )
      +224                for a, trf in new_axes.items()
      +225            },
      +226            inner_slice={
      +227                a: (
      +228                    SliceInfo(0, trf)
      +229                    if isinstance(trf, int)
      +230                    else SliceInfo(
      +231                        trf.compute(self.inner_slice[trf.axis].start),
      +232                        trf.compute(self.inner_slice[trf.axis].stop),
      +233                    )
      +234                )
      +235                for a, trf in new_axes.items()
      +236            },
      +237            halo={
      +238                a: (
      +239                    Halo(0, 0)
      +240                    if isinstance(trf, int)
      +241                    else Halo(self.halo[trf.axis].left, self.halo[trf.axis].right)
      +242                )
      +243                for a, trf in new_axes.items()
      +244            },
      +245            block_index=self.block_index,
      +246            blocks_in_sample=self.blocks_in_sample,
      +247        )
      +248
      +249
      +250def split_shape_into_blocks(
      +251    shape: PerAxis[int],
      +252    block_shape: PerAxis[int],
      +253    halo: PerAxis[HaloLike],
      +254    stride: Optional[PerAxis[int]] = None,
      +255) -> Tuple[TotalNumberOfBlocks, Generator[BlockMeta, Any, None]]:
      +256    assert all(a in shape for a in block_shape), (
      +257        tuple(shape),
      +258        set(block_shape),
      +259    )
      +260    if any(shape[a] < block_shape[a] for a in block_shape):
      +261        raise ValueError(f"shape {shape} is smaller than block shape {block_shape}")
      +262
      +263    assert all(a in shape for a in halo), (tuple(shape), set(halo))
      +264
      +265    # fill in default halo (0) and block axis length (from tensor shape)
      +266    halo = {a: Halo.create(halo.get(a, 0)) for a in shape}
      +267    block_shape = {a: block_shape.get(a, s) for a, s in shape.items()}
      +268    if stride is None:
      +269        stride = {}
      +270
      +271    inner_1d_slices: Dict[AxisId, List[SliceInfo]] = {}
      +272    for a, s in shape.items():
      +273        inner_size = block_shape[a] - sum(halo[a])
      +274        stride_1d = stride.get(a, inner_size)
      +275        inner_1d_slices[a] = [
      +276            SliceInfo(min(p, s - inner_size), min(p + inner_size, s))
      +277            for p in range(0, s, stride_1d)
      +278        ]
      +279
      +280    n_blocks = prod(map(len, inner_1d_slices.values()))
      +281
      +282    return n_blocks, _block_meta_generator(
      +283        shape,
      +284        blocks_in_sample=n_blocks,
      +285        inner_1d_slices=inner_1d_slices,
      +286        halo=halo,
      +287    )
      +288
      +289
      +290def _block_meta_generator(
      +291    sample_shape: PerAxis[int],
      +292    *,
      +293    blocks_in_sample: int,
      +294    inner_1d_slices: Dict[AxisId, List[SliceInfo]],
      +295    halo: PerAxis[HaloLike],
      +296):
      +297    assert all(a in sample_shape for a in halo)
      +298
      +299    halo = {a: Halo.create(halo.get(a, 0)) for a in inner_1d_slices}
      +300    for i, nd_tile in enumerate(itertools.product(*inner_1d_slices.values())):
      +301        inner_slice: PerAxis[SliceInfo] = dict(zip(inner_1d_slices, nd_tile))
      +302
      +303        yield BlockMeta(
      +304            sample_shape=sample_shape,
      +305            inner_slice=inner_slice,
      +306            halo=halo,
      +307            block_index=i,
      +308            blocks_in_sample=blocks_in_sample,
      +309        )
      +310
      +311
      +312def split_multiple_shapes_into_blocks(
      +313    shapes: PerMember[PerAxis[int]],
      +314    block_shapes: PerMember[PerAxis[int]],
      +315    *,
      +316    halo: PerMember[PerAxis[HaloLike]],
      +317    strides: Optional[PerMember[PerAxis[int]]] = None,
      +318    broadcast: bool = False,
      +319) -> Tuple[TotalNumberOfBlocks, Iterable[PerMember[BlockMeta]]]:
      +320    if unknown_blocks := [t for t in block_shapes if t not in shapes]:
      +321        raise ValueError(
      +322            f"block shape specified for unknown tensors: {unknown_blocks}."
      +323        )
      +324
      +325    if not block_shapes:
      +326        block_shapes = shapes
      +327
      +328    if not broadcast and (
      +329        missing_blocks := [t for t in shapes if t not in block_shapes]
      +330    ):
      +331        raise ValueError(
      +332            f"no block shape specified for {missing_blocks}."
      +333            + " Set `broadcast` to True if these tensors should be repeated"
      +334            + " as a whole for each block."
      +335        )
      +336
      +337    if extra_halo := [t for t in halo if t not in block_shapes]:
      +338        raise ValueError(
      +339            f"`halo` specified for tensors without block shape: {extra_halo}."
      +340        )
      +341
      +342    if strides is None:
      +343        strides = {}
      +344
      +345    assert not (
      +346        unknown_block := [t for t in strides if t not in block_shapes]
      +347    ), f"`stride` specified for tensors without block shape: {unknown_block}"
      +348
      +349    blocks: Dict[MemberId, Iterable[BlockMeta]] = {}
      +350    n_blocks: Dict[MemberId, TotalNumberOfBlocks] = {}
      +351    for t in block_shapes:
      +352        n_blocks[t], blocks[t] = split_shape_into_blocks(
      +353            shape=shapes[t],
      +354            block_shape=block_shapes[t],
      +355            halo=halo.get(t, {}),
      +356            stride=strides.get(t),
      +357        )
      +358        assert n_blocks[t] > 0, n_blocks
      +359
      +360    assert len(blocks) > 0, blocks
      +361    assert len(n_blocks) > 0, n_blocks
      +362    unique_n_blocks = set(n_blocks.values())
      +363    n = max(unique_n_blocks)
      +364    if len(unique_n_blocks) == 2 and 1 in unique_n_blocks:
      +365        if not broadcast:
      +366            raise ValueError(
      +367                "Mismatch for total number of blocks due to unsplit (single block)"
      +368                + f" tensors: {n_blocks}. Set `broadcast` to True if you want to"
      +369                + " repeat unsplit (single block) tensors."
      +370            )
      +371
      +372        blocks = {
      +373            t: _repeat_single_block(block_gen, n) if n_blocks[t] == 1 else block_gen
      +374            for t, block_gen in blocks.items()
      +375        }
      +376    elif len(unique_n_blocks) != 1:
      +377        raise ValueError(f"Mismatch for total number of blocks: {n_blocks}")
      +378
      +379    return n, _aligned_blocks_generator(n, blocks)
      +380
      +381
      +382def _aligned_blocks_generator(
      +383    n: TotalNumberOfBlocks, blocks: Dict[MemberId, Iterable[BlockMeta]]
      +384):
      +385    iterators = {t: iter(gen) for t, gen in blocks.items()}
      +386    for _ in range(n):
      +387        yield {t: next(it) for t, it in iterators.items()}
      +388
      +389
      +390def _repeat_single_block(block_generator: Iterable[BlockMeta], n: TotalNumberOfBlocks):
      +391    round_two = False
      +392    for block in block_generator:
      +393        assert not round_two
      +394        for _ in range(n):
      +395            yield block
      +396
      +397        round_two = True
      +
      + + +
      +
      + +
      +
      @dataclass
      + + class + LinearAxisTransform: + + + +
      + +
      36@dataclass
      +37class LinearAxisTransform:
      +38    axis: AxisId
      +39    scale: float
      +40    offset: int
      +41
      +42    def compute(self, s: int, round: Callable[[float], int] = floor) -> int:
      +43        return round(s * self.scale) + self.offset
      +
      + + + + +
      +
      + + LinearAxisTransform(axis: bioimageio.spec.model.v0_5.AxisId, scale: float, offset: int) + + +
      + + + + +
      +
      + + + + + +
      +
      +
      + scale: float + + +
      + + + + +
      +
      +
      + offset: int + + +
      + + + + +
      +
      + +
      + + def + compute( self, s: int, round: Callable[[float], int] = <built-in function floor>) -> int: + + + +
      + +
      42    def compute(self, s: int, round: Callable[[float], int] = floor) -> int:
      +43        return round(s * self.scale) + self.offset
      +
      + + + + +
      +
      +
      + +
      +
      @dataclass(frozen=True)
      + + class + BlockMeta: + + + +
      + +
       46@dataclass(frozen=True)
      + 47class BlockMeta:
      + 48    """Block meta data of a sample member (a tensor in a sample)
      + 49
      + 50    Figure for illustration:
      + 51    The first 2d block (dashed) of a sample member (**bold**).
      + 52    The inner slice (thin) is expanded by a halo in both dimensions on both sides.
      + 53    The outer slice reaches from the sample member origin (0, 0) to the right halo point.
      + 54
      + 55    ```terminal
      + 56    ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─  ─ ─ ─ ─ ─ ─ ─ ┐
      + 57    ╷ halo(left)                         ╷
      + 58    ╷                                    ╷
      + 59    ╷  (0, 0)┏━━━━━━━━━━━━━━━━━┯━━━━━━━━━┯━━━➔
      + 60    ╷        ┃                 │         ╷  sample member
      + 61    ╷        ┃      inner      │         ╷
      + 62    ╷        ┃   (and outer)   │  outer  ╷
      + 63    ╷        ┃      slice      │  slice  ╷
      + 64    ╷        ┃                 │         ╷
      + 65    ╷        ┣─────────────────┘         ╷
      + 66    ╷        ┃   outer slice             ╷
      + 67    ╷        ┃               halo(right) ╷
      + 68    └ ─ ─ ─ ─┃─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─┘
      + 69
      + 70    ```
      + 71
      + 72    note:
      + 73    - Inner and outer slices are specified in sample member coordinates.
      + 74    - The outer_slice of a block at the sample edge may overlap by more than the
      + 75        halo with the neighboring block (the inner slices will not overlap though).
      + 76
      + 77    """
      + 78
      + 79    sample_shape: PerAxis[int]
      + 80    """the axis sizes of the whole (unblocked) sample"""
      + 81
      + 82    inner_slice: PerAxis[SliceInfo]
      + 83    """inner region (without halo) wrt the sample"""
      + 84
      + 85    halo: PerAxis[Halo]
      + 86    """halo enlarging the inner region to the block's sizes"""
      + 87
      + 88    block_index: BlockIndex
      + 89    """the i-th block of the sample"""
      + 90
      + 91    blocks_in_sample: TotalNumberOfBlocks
      + 92    """total number of blocks in the sample"""
      + 93
      + 94    @cached_property
      + 95    def shape(self) -> PerAxis[int]:
      + 96        """axis lengths of the block"""
      + 97        return Frozen(
      + 98            {
      + 99                a: s.stop - s.start + (sum(self.halo[a]) if a in self.halo else 0)
      +100                for a, s in self.inner_slice.items()
      +101            }
      +102        )
      +103
      +104    @cached_property
      +105    def padding(self) -> PerAxis[PadWidth]:
      +106        """padding to realize the halo at the sample edge
      +107        where we cannot simply enlarge the inner slice"""
      +108        return Frozen(
      +109            {
      +110                a: PadWidth(
      +111                    (
      +112                        self.halo[a].left
      +113                        - (self.inner_slice[a].start - self.outer_slice[a].start)
      +114                        if a in self.halo
      +115                        else 0
      +116                    ),
      +117                    (
      +118                        self.halo[a].right
      +119                        - (self.outer_slice[a].stop - self.inner_slice[a].stop)
      +120                        if a in self.halo
      +121                        else 0
      +122                    ),
      +123                )
      +124                for a in self.inner_slice
      +125            }
      +126        )
      +127
      +128    @cached_property
      +129    def outer_slice(self) -> PerAxis[SliceInfo]:
      +130        """slice of the outer block (without padding) wrt the sample"""
      +131        return Frozen(
      +132            {
      +133                a: SliceInfo(
      +134                    max(
      +135                        0,
      +136                        min(
      +137                            self.inner_slice[a].start
      +138                            - (self.halo[a].left if a in self.halo else 0),
      +139                            self.sample_shape[a]
      +140                            - self.inner_shape[a]
      +141                            - (self.halo[a].left if a in self.halo else 0),
      +142                        ),
      +143                    ),
      +144                    min(
      +145                        self.sample_shape[a],
      +146                        self.inner_slice[a].stop
      +147                        + (self.halo[a].right if a in self.halo else 0),
      +148                    ),
      +149                )
      +150                for a in self.inner_slice
      +151            }
      +152        )
      +153
      +154    @cached_property
      +155    def inner_shape(self) -> PerAxis[int]:
      +156        """axis lengths of the inner region (without halo)"""
      +157        return Frozen({a: s.stop - s.start for a, s in self.inner_slice.items()})
      +158
      +159    @cached_property
      +160    def local_slice(self) -> PerAxis[SliceInfo]:
      +161        """inner slice wrt the block, **not** the sample"""
      +162        return Frozen(
      +163            {
      +164                a: SliceInfo(
      +165                    self.halo[a].left,
      +166                    self.halo[a].left + self.inner_shape[a],
      +167                )
      +168                for a in self.inner_slice
      +169            }
      +170        )
      +171
      +172    @property
      +173    def dims(self) -> Collection[AxisId]:
      +174        return set(self.inner_shape)
      +175
      +176    @property
      +177    def tagged_shape(self) -> PerAxis[int]:
      +178        """alias for shape"""
      +179        return self.shape
      +180
      +181    @property
      +182    def inner_slice_wo_overlap(self):
      +183        """subslice of the inner slice, such that all `inner_slice_wo_overlap` can be
      +184        stiched together trivially to form the original sample.
      +185
      +186        This can also be used to calculate statistics
      +187        without overrepresenting block edge regions."""
      +188        # TODO: update inner_slice_wo_overlap when adding block overlap
      +189        return self.inner_slice
      +190
      +191    def __post_init__(self):
      +192        # freeze mutable inputs
      +193        if not isinstance(self.sample_shape, Frozen):
      +194            object.__setattr__(self, "sample_shape", Frozen(self.sample_shape))
      +195
      +196        if not isinstance(self.inner_slice, Frozen):
      +197            object.__setattr__(self, "inner_slice", Frozen(self.inner_slice))
      +198
      +199        if not isinstance(self.halo, Frozen):
      +200            object.__setattr__(self, "halo", Frozen(self.halo))
      +201
      +202        assert all(
      +203            a in self.sample_shape for a in self.inner_slice
      +204        ), "block has axes not present in sample"
      +205
      +206        assert all(
      +207            a in self.inner_slice for a in self.halo
      +208        ), "halo has axes not present in block"
      +209
      +210        if any(s > self.sample_shape[a] for a, s in self.shape.items()):
      +211            logger.warning(
      +212                "block {} larger than sample {}", self.shape, self.sample_shape
      +213            )
      +214
      +215    def get_transformed(
      +216        self, new_axes: PerAxis[Union[LinearAxisTransform, int]]
      +217    ) -> Self:
      +218        return self.__class__(
      +219            sample_shape={
      +220                a: (
      +221                    trf
      +222                    if isinstance(trf, int)
      +223                    else trf.compute(self.sample_shape[trf.axis])
      +224                )
      +225                for a, trf in new_axes.items()
      +226            },
      +227            inner_slice={
      +228                a: (
      +229                    SliceInfo(0, trf)
      +230                    if isinstance(trf, int)
      +231                    else SliceInfo(
      +232                        trf.compute(self.inner_slice[trf.axis].start),
      +233                        trf.compute(self.inner_slice[trf.axis].stop),
      +234                    )
      +235                )
      +236                for a, trf in new_axes.items()
      +237            },
      +238            halo={
      +239                a: (
      +240                    Halo(0, 0)
      +241                    if isinstance(trf, int)
      +242                    else Halo(self.halo[trf.axis].left, self.halo[trf.axis].right)
      +243                )
      +244                for a, trf in new_axes.items()
      +245            },
      +246            block_index=self.block_index,
      +247            blocks_in_sample=self.blocks_in_sample,
      +248        )
      +
      + + +

      Block meta data of a sample member (a tensor in a sample)

      + +

      Figure for illustration: +The first 2d block (dashed) of a sample member (bold). +The inner slice (thin) is expanded by a halo in both dimensions on both sides. +The outer slice reaches from the sample member origin (0, 0) to the right halo point.

      + +
      ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─  ─ ─ ─ ─ ─ ─ ─ ┐
      +╷ halo(left)                         ╷
      +╷                                    ╷
      +╷  (0, 0)┏━━━━━━━━━━━━━━━━━┯━━━━━━━━━┯━━━➔
      +╷        ┃                 │         ╷  sample member
      +╷        ┃      inner      │         ╷
      +╷        ┃   (and outer)   │  outer  ╷
      +╷        ┃      slice      │  slice  ╷
      +╷        ┃                 │         ╷
      +╷        ┣─────────────────┘         ╷
      +╷        ┃   outer slice             ╷
      +╷        ┃               halo(right) ╷
      +└ ─ ─ ─ ─┃─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─┘
      +         ⬇
      +
      + +

      note:

      + +
        +
      • Inner and outer slices are specified in sample member coordinates.
      • +
      • The outer_slice of a block at the sample edge may overlap by more than the +halo with the neighboring block (the inner slices will not overlap though).
      • +
      +
      + + +
      +
      + + BlockMeta( sample_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int], inner_slice: Mapping[bioimageio.spec.model.v0_5.AxisId, bioimageio.core.common.SliceInfo], halo: Mapping[bioimageio.spec.model.v0_5.AxisId, bioimageio.core.common.Halo], block_index: int, blocks_in_sample: int) + + +
      + + + + +
      +
      +
      + sample_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int] + + +
      + + +

      the axis sizes of the whole (unblocked) sample

      +
      + + +
      +
      + + + +

      inner region (without halo) wrt the sample

      +
      + + +
      +
      + + + +

      halo enlarging the inner region to the block's sizes

      +
      + + +
      +
      +
      + block_index: int + + +
      + + +

      the i-th block of the sample

      +
      + + +
      +
      +
      + blocks_in_sample: int + + +
      + + +

      total number of blocks in the sample

      +
      + + +
      +
      + +
      + shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int] + + + +
      + +
       94    @cached_property
      + 95    def shape(self) -> PerAxis[int]:
      + 96        """axis lengths of the block"""
      + 97        return Frozen(
      + 98            {
      + 99                a: s.stop - s.start + (sum(self.halo[a]) if a in self.halo else 0)
      +100                for a, s in self.inner_slice.items()
      +101            }
      +102        )
      +
      + + +

      axis lengths of the block

      +
      + + +
      +
      + +
      + padding: Mapping[bioimageio.spec.model.v0_5.AxisId, bioimageio.core.common.PadWidth] + + + +
      + +
      104    @cached_property
      +105    def padding(self) -> PerAxis[PadWidth]:
      +106        """padding to realize the halo at the sample edge
      +107        where we cannot simply enlarge the inner slice"""
      +108        return Frozen(
      +109            {
      +110                a: PadWidth(
      +111                    (
      +112                        self.halo[a].left
      +113                        - (self.inner_slice[a].start - self.outer_slice[a].start)
      +114                        if a in self.halo
      +115                        else 0
      +116                    ),
      +117                    (
      +118                        self.halo[a].right
      +119                        - (self.outer_slice[a].stop - self.inner_slice[a].stop)
      +120                        if a in self.halo
      +121                        else 0
      +122                    ),
      +123                )
      +124                for a in self.inner_slice
      +125            }
      +126        )
      +
      + + +

      padding to realize the halo at the sample edge +where we cannot simply enlarge the inner slice

      +
      + + +
      +
      + +
      + outer_slice: Mapping[bioimageio.spec.model.v0_5.AxisId, bioimageio.core.common.SliceInfo] + + + +
      + +
      128    @cached_property
      +129    def outer_slice(self) -> PerAxis[SliceInfo]:
      +130        """slice of the outer block (without padding) wrt the sample"""
      +131        return Frozen(
      +132            {
      +133                a: SliceInfo(
      +134                    max(
      +135                        0,
      +136                        min(
      +137                            self.inner_slice[a].start
      +138                            - (self.halo[a].left if a in self.halo else 0),
      +139                            self.sample_shape[a]
      +140                            - self.inner_shape[a]
      +141                            - (self.halo[a].left if a in self.halo else 0),
      +142                        ),
      +143                    ),
      +144                    min(
      +145                        self.sample_shape[a],
      +146                        self.inner_slice[a].stop
      +147                        + (self.halo[a].right if a in self.halo else 0),
      +148                    ),
      +149                )
      +150                for a in self.inner_slice
      +151            }
      +152        )
      +
      + + +

      slice of the outer block (without padding) wrt the sample

      +
      + + +
      +
      + +
      + inner_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int] + + + +
      + +
      154    @cached_property
      +155    def inner_shape(self) -> PerAxis[int]:
      +156        """axis lengths of the inner region (without halo)"""
      +157        return Frozen({a: s.stop - s.start for a, s in self.inner_slice.items()})
      +
      + + +

      axis lengths of the inner region (without halo)

      +
      + + +
      +
      + +
      + local_slice: Mapping[bioimageio.spec.model.v0_5.AxisId, bioimageio.core.common.SliceInfo] + + + +
      + +
      159    @cached_property
      +160    def local_slice(self) -> PerAxis[SliceInfo]:
      +161        """inner slice wrt the block, **not** the sample"""
      +162        return Frozen(
      +163            {
      +164                a: SliceInfo(
      +165                    self.halo[a].left,
      +166                    self.halo[a].left + self.inner_shape[a],
      +167                )
      +168                for a in self.inner_slice
      +169            }
      +170        )
      +
      + + +

      inner slice wrt the block, not the sample

      +
      + + +
      +
      + +
      + dims: Collection[bioimageio.spec.model.v0_5.AxisId] + + + +
      + +
      172    @property
      +173    def dims(self) -> Collection[AxisId]:
      +174        return set(self.inner_shape)
      +
      + + + + +
      +
      + +
      + tagged_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int] + + + +
      + +
      176    @property
      +177    def tagged_shape(self) -> PerAxis[int]:
      +178        """alias for shape"""
      +179        return self.shape
      +
      + + +

      alias for shape

      +
      + + +
      +
      + +
      + inner_slice_wo_overlap + + + +
      + +
      181    @property
      +182    def inner_slice_wo_overlap(self):
      +183        """subslice of the inner slice, such that all `inner_slice_wo_overlap` can be
      +184        stiched together trivially to form the original sample.
      +185
      +186        This can also be used to calculate statistics
      +187        without overrepresenting block edge regions."""
      +188        # TODO: update inner_slice_wo_overlap when adding block overlap
      +189        return self.inner_slice
      +
      + + +

      subslice of the inner slice, such that all inner_slice_wo_overlap can be +stiched together trivially to form the original sample.

      + +

      This can also be used to calculate statistics +without overrepresenting block edge regions.

      +
      + + +
      +
      + +
      + + def + get_transformed( self, new_axes: Mapping[bioimageio.spec.model.v0_5.AxisId, Union[LinearAxisTransform, int]]) -> Self: + + + +
      + +
      215    def get_transformed(
      +216        self, new_axes: PerAxis[Union[LinearAxisTransform, int]]
      +217    ) -> Self:
      +218        return self.__class__(
      +219            sample_shape={
      +220                a: (
      +221                    trf
      +222                    if isinstance(trf, int)
      +223                    else trf.compute(self.sample_shape[trf.axis])
      +224                )
      +225                for a, trf in new_axes.items()
      +226            },
      +227            inner_slice={
      +228                a: (
      +229                    SliceInfo(0, trf)
      +230                    if isinstance(trf, int)
      +231                    else SliceInfo(
      +232                        trf.compute(self.inner_slice[trf.axis].start),
      +233                        trf.compute(self.inner_slice[trf.axis].stop),
      +234                    )
      +235                )
      +236                for a, trf in new_axes.items()
      +237            },
      +238            halo={
      +239                a: (
      +240                    Halo(0, 0)
      +241                    if isinstance(trf, int)
      +242                    else Halo(self.halo[trf.axis].left, self.halo[trf.axis].right)
      +243                )
      +244                for a, trf in new_axes.items()
      +245            },
      +246            block_index=self.block_index,
      +247            blocks_in_sample=self.blocks_in_sample,
      +248        )
      +
      + + + + +
      +
      +
      + +
      + + def + split_shape_into_blocks( shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int], block_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int], halo: Mapping[bioimageio.spec.model.v0_5.AxisId, Union[int, Tuple[int, int], bioimageio.core.common.Halo]], stride: Optional[Mapping[bioimageio.spec.model.v0_5.AxisId, int]] = None) -> Tuple[int, Generator[BlockMeta, Any, NoneType]]: + + + +
      + +
      251def split_shape_into_blocks(
      +252    shape: PerAxis[int],
      +253    block_shape: PerAxis[int],
      +254    halo: PerAxis[HaloLike],
      +255    stride: Optional[PerAxis[int]] = None,
      +256) -> Tuple[TotalNumberOfBlocks, Generator[BlockMeta, Any, None]]:
      +257    assert all(a in shape for a in block_shape), (
      +258        tuple(shape),
      +259        set(block_shape),
      +260    )
      +261    if any(shape[a] < block_shape[a] for a in block_shape):
      +262        raise ValueError(f"shape {shape} is smaller than block shape {block_shape}")
      +263
      +264    assert all(a in shape for a in halo), (tuple(shape), set(halo))
      +265
      +266    # fill in default halo (0) and block axis length (from tensor shape)
      +267    halo = {a: Halo.create(halo.get(a, 0)) for a in shape}
      +268    block_shape = {a: block_shape.get(a, s) for a, s in shape.items()}
      +269    if stride is None:
      +270        stride = {}
      +271
      +272    inner_1d_slices: Dict[AxisId, List[SliceInfo]] = {}
      +273    for a, s in shape.items():
      +274        inner_size = block_shape[a] - sum(halo[a])
      +275        stride_1d = stride.get(a, inner_size)
      +276        inner_1d_slices[a] = [
      +277            SliceInfo(min(p, s - inner_size), min(p + inner_size, s))
      +278            for p in range(0, s, stride_1d)
      +279        ]
      +280
      +281    n_blocks = prod(map(len, inner_1d_slices.values()))
      +282
      +283    return n_blocks, _block_meta_generator(
      +284        shape,
      +285        blocks_in_sample=n_blocks,
      +286        inner_1d_slices=inner_1d_slices,
      +287        halo=halo,
      +288    )
      +
      + + + + +
      +
      + +
      + + def + split_multiple_shapes_into_blocks( shapes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]], block_shapes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]], *, halo: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, Union[int, Tuple[int, int], bioimageio.core.common.Halo]]], strides: Optional[Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]]] = None, broadcast: bool = False) -> Tuple[int, Iterable[Mapping[bioimageio.spec.model.v0_5.TensorId, BlockMeta]]]: + + + +
      + +
      313def split_multiple_shapes_into_blocks(
      +314    shapes: PerMember[PerAxis[int]],
      +315    block_shapes: PerMember[PerAxis[int]],
      +316    *,
      +317    halo: PerMember[PerAxis[HaloLike]],
      +318    strides: Optional[PerMember[PerAxis[int]]] = None,
      +319    broadcast: bool = False,
      +320) -> Tuple[TotalNumberOfBlocks, Iterable[PerMember[BlockMeta]]]:
      +321    if unknown_blocks := [t for t in block_shapes if t not in shapes]:
      +322        raise ValueError(
      +323            f"block shape specified for unknown tensors: {unknown_blocks}."
      +324        )
      +325
      +326    if not block_shapes:
      +327        block_shapes = shapes
      +328
      +329    if not broadcast and (
      +330        missing_blocks := [t for t in shapes if t not in block_shapes]
      +331    ):
      +332        raise ValueError(
      +333            f"no block shape specified for {missing_blocks}."
      +334            + " Set `broadcast` to True if these tensors should be repeated"
      +335            + " as a whole for each block."
      +336        )
      +337
      +338    if extra_halo := [t for t in halo if t not in block_shapes]:
      +339        raise ValueError(
      +340            f"`halo` specified for tensors without block shape: {extra_halo}."
      +341        )
      +342
      +343    if strides is None:
      +344        strides = {}
      +345
      +346    assert not (
      +347        unknown_block := [t for t in strides if t not in block_shapes]
      +348    ), f"`stride` specified for tensors without block shape: {unknown_block}"
      +349
      +350    blocks: Dict[MemberId, Iterable[BlockMeta]] = {}
      +351    n_blocks: Dict[MemberId, TotalNumberOfBlocks] = {}
      +352    for t in block_shapes:
      +353        n_blocks[t], blocks[t] = split_shape_into_blocks(
      +354            shape=shapes[t],
      +355            block_shape=block_shapes[t],
      +356            halo=halo.get(t, {}),
      +357            stride=strides.get(t),
      +358        )
      +359        assert n_blocks[t] > 0, n_blocks
      +360
      +361    assert len(blocks) > 0, blocks
      +362    assert len(n_blocks) > 0, n_blocks
      +363    unique_n_blocks = set(n_blocks.values())
      +364    n = max(unique_n_blocks)
      +365    if len(unique_n_blocks) == 2 and 1 in unique_n_blocks:
      +366        if not broadcast:
      +367            raise ValueError(
      +368                "Mismatch for total number of blocks due to unsplit (single block)"
      +369                + f" tensors: {n_blocks}. Set `broadcast` to True if you want to"
      +370                + " repeat unsplit (single block) tensors."
      +371            )
      +372
      +373        blocks = {
      +374            t: _repeat_single_block(block_gen, n) if n_blocks[t] == 1 else block_gen
      +375            for t, block_gen in blocks.items()
      +376        }
      +377    elif len(unique_n_blocks) != 1:
      +378        raise ValueError(f"Mismatch for total number of blocks: {n_blocks}")
      +379
      +380    return n, _aligned_blocks_generator(n, blocks)
      +
      + + + + +
      +
      + + \ No newline at end of file diff --git a/bioimageio/core/cli.html b/bioimageio/core/cli.html new file mode 100644 index 00000000..86d0d56c --- /dev/null +++ b/bioimageio/core/cli.html @@ -0,0 +1,2580 @@ + + + + + + + bioimageio.core.cli API documentation + + + + + + + + + + +
      +
      +

      +bioimageio.core.cli

      + +

      bioimageio CLI

      + +

      Note: Some docstrings use a hair space ' ' + to place the added '(default: ...)' on a new line.

      +
      + + + + + +
        1"""bioimageio CLI
      +  2
      +  3Note: Some docstrings use a hair space ' '
      +  4      to place the added '(default: ...)' on a new line.
      +  5"""
      +  6
      +  7import json
      +  8import shutil
      +  9import subprocess
      + 10import sys
      + 11from argparse import RawTextHelpFormatter
      + 12from difflib import SequenceMatcher
      + 13from functools import cached_property
      + 14from pathlib import Path
      + 15from pprint import pformat, pprint
      + 16from typing import (
      + 17    Any,
      + 18    Dict,
      + 19    Iterable,
      + 20    List,
      + 21    Mapping,
      + 22    Optional,
      + 23    Sequence,
      + 24    Set,
      + 25    Tuple,
      + 26    Type,
      + 27    Union,
      + 28)
      + 29
      + 30from loguru import logger
      + 31from pydantic import BaseModel, Field, model_validator
      + 32from pydantic_settings import (
      + 33    BaseSettings,
      + 34    CliPositionalArg,
      + 35    CliSettingsSource,
      + 36    CliSubCommand,
      + 37    JsonConfigSettingsSource,
      + 38    PydanticBaseSettingsSource,
      + 39    SettingsConfigDict,
      + 40    YamlConfigSettingsSource,
      + 41)
      + 42from ruyaml import YAML
      + 43from tqdm import tqdm
      + 44from typing_extensions import assert_never
      + 45
      + 46from bioimageio.spec import AnyModelDescr, InvalidDescr, load_description
      + 47from bioimageio.spec._internal.io_basics import ZipPath
      + 48from bioimageio.spec._internal.types import NotEmpty
      + 49from bioimageio.spec.dataset import DatasetDescr
      + 50from bioimageio.spec.model import ModelDescr, v0_4, v0_5
      + 51from bioimageio.spec.notebook import NotebookDescr
      + 52from bioimageio.spec.utils import download, ensure_description_is_model
      + 53
      + 54from .commands import (
      + 55    WeightFormatArgAll,
      + 56    WeightFormatArgAny,
      + 57    package,
      + 58    test,
      + 59    validate_format,
      + 60)
      + 61from .common import MemberId, SampleId
      + 62from .digest_spec import get_member_ids, load_sample_for_model
      + 63from .io import load_dataset_stat, save_dataset_stat, save_sample
      + 64from .prediction import create_prediction_pipeline
      + 65from .proc_setup import (
      + 66    DatasetMeasure,
      + 67    Measure,
      + 68    MeasureValue,
      + 69    StatsCalculator,
      + 70    get_required_dataset_measures,
      + 71)
      + 72from .sample import Sample
      + 73from .stat_measures import Stat
      + 74from .utils import VERSION
      + 75
      + 76yaml = YAML(typ="safe")
      + 77
      + 78
      + 79class CmdBase(BaseModel, use_attribute_docstrings=True, cli_implicit_flags=True):
      + 80    pass
      + 81
      + 82
      + 83class ArgMixin(BaseModel, use_attribute_docstrings=True, cli_implicit_flags=True):
      + 84    pass
      + 85
      + 86
      + 87class WithSource(ArgMixin):
      + 88    source: CliPositionalArg[str]
      + 89    """Url/path to a bioimageio.yaml/rdf.yaml file
      + 90    or a bioimage.io resource identifier, e.g. 'affable-shark'"""
      + 91
      + 92    @cached_property
      + 93    def descr(self):
      + 94        return load_description(self.source)
      + 95
      + 96    @property
      + 97    def descr_id(self) -> str:
      + 98        """a more user-friendly description id
      + 99        (replacing legacy ids with their nicknames)
      +100        """
      +101        if isinstance(self.descr, InvalidDescr):
      +102            return str(getattr(self.descr, "id", getattr(self.descr, "name")))
      +103        else:
      +104            return str(
      +105                (
      +106                    (bio_config := self.descr.config.get("bioimageio", {}))
      +107                    and isinstance(bio_config, dict)
      +108                    and bio_config.get("nickname")
      +109                )
      +110                or self.descr.id
      +111                or self.descr.name
      +112            )
      +113
      +114
      +115class ValidateFormatCmd(CmdBase, WithSource):
      +116    """validate the meta data format of a bioimageio resource."""
      +117
      +118    def run(self):
      +119        sys.exit(validate_format(self.descr))
      +120
      +121
      +122class TestCmd(CmdBase, WithSource):
      +123    """Test a bioimageio resource (beyond meta data formatting)"""
      +124
      +125    weight_format: WeightFormatArgAll = "all"
      +126    """The weight format to limit testing to.
      +127
      +128    (only relevant for model resources)"""
      +129
      +130    devices: Optional[Union[str, Sequence[str]]] = None
      +131    """Device(s) to use for testing"""
      +132
      +133    decimal: int = 4
      +134    """Precision for numerical comparisons"""
      +135
      +136    def run(self):
      +137        sys.exit(
      +138            test(
      +139                self.descr,
      +140                weight_format=self.weight_format,
      +141                devices=self.devices,
      +142                decimal=self.decimal,
      +143            )
      +144        )
      +145
      +146
      +147class PackageCmd(CmdBase, WithSource):
      +148    """save a resource's metadata with its associated files."""
      +149
      +150    path: CliPositionalArg[Path]
      +151    """The path to write the (zipped) package to.
      +152    If it does not have a `.zip` suffix
      +153    this command will save the package as an unzipped folder instead."""
      +154
      +155    weight_format: WeightFormatArgAll = "all"
      +156    """The weight format to include in the package (for model descriptions only)."""
      +157
      +158    def run(self):
      +159        if isinstance(self.descr, InvalidDescr):
      +160            self.descr.validation_summary.display()
      +161            raise ValueError("resource description is invalid")
      +162
      +163        sys.exit(
      +164            package(
      +165                self.descr,
      +166                self.path,
      +167                weight_format=self.weight_format,
      +168            )
      +169        )
      +170
      +171
      +172def _get_stat(
      +173    model_descr: AnyModelDescr,
      +174    dataset: Iterable[Sample],
      +175    dataset_length: int,
      +176    stats_path: Path,
      +177) -> Mapping[DatasetMeasure, MeasureValue]:
      +178    req_dataset_meas, _ = get_required_dataset_measures(model_descr)
      +179    if not req_dataset_meas:
      +180        return {}
      +181
      +182    req_dataset_meas, _ = get_required_dataset_measures(model_descr)
      +183
      +184    if stats_path.exists():
      +185        logger.info(f"loading precomputed dataset measures from {stats_path}")
      +186        stat = load_dataset_stat(stats_path)
      +187        for m in req_dataset_meas:
      +188            if m not in stat:
      +189                raise ValueError(f"Missing {m} in {stats_path}")
      +190
      +191        return stat
      +192
      +193    stats_calc = StatsCalculator(req_dataset_meas)
      +194
      +195    for sample in tqdm(
      +196        dataset, total=dataset_length, desc="precomputing dataset stats", unit="sample"
      +197    ):
      +198        stats_calc.update(sample)
      +199
      +200    stat = stats_calc.finalize()
      +201    save_dataset_stat(stat, stats_path)
      +202
      +203    return stat
      +204
      +205
      +206class PredictCmd(CmdBase, WithSource):
      +207    """Run inference on your data with a bioimage.io model."""
      +208
      +209    inputs: NotEmpty[Sequence[Union[str, NotEmpty[Tuple[str, ...]]]]] = (
      +210        "{input_id}/001.tif",
      +211    )
      +212    """Model input sample paths (for each input tensor)
      +213
      +214    The input paths are expected to have shape...
      +215     - (n_samples,) or (n_samples,1) for models expecting a single input tensor
      +216     - (n_samples,) containing the substring '{input_id}', or
      +217     - (n_samples, n_model_inputs) to provide each input tensor path explicitly.
      +218
      +219    All substrings that are replaced by metadata from the model description:
      +220    - '{model_id}'
      +221    - '{input_id}'
      +222
      +223    Example inputs to process sample 'a' and 'b'
      +224    for a model expecting a 'raw' and a 'mask' input tensor:
      +225    --inputs="[[\"a_raw.tif\",\"a_mask.tif\"],[\"b_raw.tif\",\"b_mask.tif\"]]"
      +226    (Note that JSON double quotes need to be escaped.)
      +227
      +228    Alternatively a `bioimageio-cli.yaml` (or `bioimageio-cli.json`) file
      +229    may provide the arguments, e.g.:
      +230    ```yaml
      +231    inputs:
      +232    - [a_raw.tif, a_mask.tif]
      +233    - [b_raw.tif, b_mask.tif]
      +234    ```
      +235
      +236    `.npy` and any file extension supported by imageio are supported.
      +237     Aavailable formats are listed at
      +238    https://imageio.readthedocs.io/en/stable/formats/index.html#all-formats.
      +239    Some formats have additional dependencies.
      +240
      +241
      +242    """
      +243
      +244    outputs: Union[str, NotEmpty[Tuple[str, ...]]] = (
      +245        "outputs_{model_id}/{output_id}/{sample_id}.tif"
      +246    )
      +247    """Model output path pattern (per output tensor)
      +248
      +249    All substrings that are replaced:
      +250    - '{model_id}' (from model description)
      +251    - '{output_id}' (from model description)
      +252    - '{sample_id}' (extracted from input paths)
      +253
      +254
      +255    """
      +256
      +257    overwrite: bool = False
      +258    """allow overwriting existing output files"""
      +259
      +260    blockwise: bool = False
      +261    """process inputs blockwise"""
      +262
      +263    stats: Path = Path("dataset_statistics.json")
      +264    """path to dataset statistics
      +265    (will be written if it does not exist,
      +266    but the model requires statistical dataset measures)
      +267     """
      +268
      +269    preview: bool = False
      +270    """preview which files would be processed
      +271    and what outputs would be generated."""
      +272
      +273    weight_format: WeightFormatArgAny = "any"
      +274    """The weight format to use."""
      +275
      +276    example: bool = False
      +277    """generate and run an example
      +278
      +279    1. downloads example model inputs
      +280    2. creates a `{model_id}_example` folder
      +281    3. writes input arguments to `{model_id}_example/bioimageio-cli.yaml`
      +282    4. executes a preview dry-run
      +283    5. executes prediction with example input
      +284
      +285
      +286    """
      +287
      +288    def _example(self):
      +289        model_descr = ensure_description_is_model(self.descr)
      +290        input_ids = get_member_ids(model_descr.inputs)
      +291        example_inputs = (
      +292            model_descr.sample_inputs
      +293            if isinstance(model_descr, v0_4.ModelDescr)
      +294            else [ipt.sample_tensor or ipt.test_tensor for ipt in model_descr.inputs]
      +295        )
      +296        if not example_inputs:
      +297            raise ValueError(f"{self.descr_id} does not specify any example inputs.")
      +298
      +299        inputs001: List[str] = []
      +300        example_path = Path(f"{self.descr_id}_example")
      +301        example_path.mkdir(exist_ok=True)
      +302
      +303        for t, src in zip(input_ids, example_inputs):
      +304            local = download(src).path
      +305            dst = Path(f"{example_path}/{t}/001{''.join(local.suffixes)}")
      +306            dst.parent.mkdir(parents=True, exist_ok=True)
      +307            inputs001.append(dst.as_posix())
      +308            if isinstance(local, Path):
      +309                shutil.copy(local, dst)
      +310            elif isinstance(local, ZipPath):
      +311                _ = local.root.extract(local.at, path=dst)
      +312            else:
      +313                assert_never(local)
      +314
      +315        inputs = [tuple(inputs001)]
      +316        output_pattern = f"{example_path}/outputs/{{output_id}}/{{sample_id}}.tif"
      +317
      +318        bioimageio_cli_path = example_path / YAML_FILE
      +319        stats_file = "dataset_statistics.json"
      +320        stats = (example_path / stats_file).as_posix()
      +321        yaml.dump(
      +322            dict(
      +323                inputs=inputs,
      +324                outputs=output_pattern,
      +325                stats=stats_file,
      +326                blockwise=self.blockwise,
      +327            ),
      +328            bioimageio_cli_path,
      +329        )
      +330
      +331        yaml_file_content = None
      +332
      +333        # escaped double quotes
      +334        inputs_json = json.dumps(inputs)
      +335        inputs_escaped = inputs_json.replace('"', r"\"")
      +336        source_escaped = self.source.replace('"', r"\"")
      +337
      +338        def get_example_command(preview: bool, escape: bool = False):
      +339            q: str = '"' if escape else ""
      +340
      +341            return [
      +342                "bioimageio",
      +343                "predict",
      +344                # --no-preview not supported for py=3.8
      +345                *(["--preview"] if preview else []),
      +346                "--overwrite",
      +347                *(["--blockwise"] if self.blockwise else []),
      +348                f"--stats={q}{stats}{q}",
      +349                f"--inputs={q}{inputs_escaped if escape else inputs_json}{q}",
      +350                f"--outputs={q}{output_pattern}{q}",
      +351                f"{q}{source_escaped if escape else self.source}{q}",
      +352            ]
      +353
      +354        if Path(YAML_FILE).exists():
      +355            logger.info(
      +356                "temporarily removing '{}' to execute example prediction", YAML_FILE
      +357            )
      +358            yaml_file_content = Path(YAML_FILE).read_bytes()
      +359            Path(YAML_FILE).unlink()
      +360
      +361        try:
      +362            _ = subprocess.run(get_example_command(True), check=True)
      +363            _ = subprocess.run(get_example_command(False), check=True)
      +364        finally:
      +365            if yaml_file_content is not None:
      +366                _ = Path(YAML_FILE).write_bytes(yaml_file_content)
      +367                logger.debug("restored '{}'", YAML_FILE)
      +368
      +369        print(
      +370            "🎉 Sucessfully ran example prediction!\n"
      +371            + "To predict the example input using the CLI example config file"
      +372            + f" {example_path/YAML_FILE}, execute `bioimageio predict` from {example_path}:\n"
      +373            + f"$ cd {str(example_path)}\n"
      +374            + f'$ bioimageio predict "{source_escaped}"\n\n'
      +375            + "Alternatively run the following command"
      +376            + " in the current workind directory, not the example folder:\n$ "
      +377            + " ".join(get_example_command(False, escape=True))
      +378            + f"\n(note that a local '{JSON_FILE}' or '{YAML_FILE}' may interfere with this)"
      +379        )
      +380
      +381    def run(self):
      +382        if self.example:
      +383            return self._example()
      +384
      +385        model_descr = ensure_description_is_model(self.descr)
      +386
      +387        input_ids = get_member_ids(model_descr.inputs)
      +388        output_ids = get_member_ids(model_descr.outputs)
      +389
      +390        minimum_input_ids = tuple(
      +391            str(ipt.id) if isinstance(ipt, v0_5.InputTensorDescr) else str(ipt.name)
      +392            for ipt in model_descr.inputs
      +393            if not isinstance(ipt, v0_5.InputTensorDescr) or not ipt.optional
      +394        )
      +395        maximum_input_ids = tuple(
      +396            str(ipt.id) if isinstance(ipt, v0_5.InputTensorDescr) else str(ipt.name)
      +397            for ipt in model_descr.inputs
      +398        )
      +399
      +400        def expand_inputs(i: int, ipt: Union[str, Tuple[str, ...]]) -> Tuple[str, ...]:
      +401            if isinstance(ipt, str):
      +402                ipts = tuple(
      +403                    ipt.format(model_id=self.descr_id, input_id=t) for t in input_ids
      +404                )
      +405            else:
      +406                ipts = tuple(
      +407                    p.format(model_id=self.descr_id, input_id=t)
      +408                    for t, p in zip(input_ids, ipt)
      +409                )
      +410
      +411            if len(set(ipts)) < len(ipts):
      +412                if len(minimum_input_ids) == len(maximum_input_ids):
      +413                    n = len(minimum_input_ids)
      +414                else:
      +415                    n = f"{len(minimum_input_ids)}-{len(maximum_input_ids)}"
      +416
      +417                raise ValueError(
      +418                    f"[input sample #{i}] Include '{{input_id}}' in path pattern or explicitly specify {n} distinct input paths (got {ipt})"
      +419                )
      +420
      +421            if len(ipts) < len(minimum_input_ids):
      +422                raise ValueError(
      +423                    f"[input sample #{i}] Expected at least {len(minimum_input_ids)} inputs {minimum_input_ids}, got {ipts}"
      +424                )
      +425
      +426            if len(ipts) > len(maximum_input_ids):
      +427                raise ValueError(
      +428                    f"Expected at most {len(maximum_input_ids)} inputs {maximum_input_ids}, got {ipts}"
      +429                )
      +430
      +431            return ipts
      +432
      +433        inputs = [expand_inputs(i, ipt) for i, ipt in enumerate(self.inputs, start=1)]
      +434
      +435        sample_paths_in = [
      +436            {t: Path(p) for t, p in zip(input_ids, ipts)} for ipts in inputs
      +437        ]
      +438
      +439        sample_ids = _get_sample_ids(sample_paths_in)
      +440
      +441        def expand_outputs():
      +442            if isinstance(self.outputs, str):
      +443                outputs = [
      +444                    tuple(
      +445                        Path(
      +446                            self.outputs.format(
      +447                                model_id=self.descr_id, output_id=t, sample_id=s
      +448                            )
      +449                        )
      +450                        for t in output_ids
      +451                    )
      +452                    for s in sample_ids
      +453                ]
      +454            else:
      +455                outputs = [
      +456                    tuple(
      +457                        Path(p.format(model_id=self.descr_id, output_id=t, sample_id=s))
      +458                        for t, p in zip(output_ids, self.outputs)
      +459                    )
      +460                    for s in sample_ids
      +461                ]
      +462
      +463            for i, out in enumerate(outputs, start=1):
      +464                if len(set(out)) < len(out):
      +465                    raise ValueError(
      +466                        f"[output sample #{i}] Include '{{output_id}}' in path pattern or explicitly specify {len(output_ids)} distinct output paths (got {out})"
      +467                    )
      +468
      +469                if len(out) != len(output_ids):
      +470                    raise ValueError(
      +471                        f"[output sample #{i}] Expected {len(output_ids)} outputs {output_ids}, got {out}"
      +472                    )
      +473
      +474            return outputs
      +475
      +476        outputs = expand_outputs()
      +477
      +478        sample_paths_out = [
      +479            {MemberId(t): Path(p) for t, p in zip(output_ids, out)} for out in outputs
      +480        ]
      +481
      +482        if not self.overwrite:
      +483            for sample_paths in sample_paths_out:
      +484                for p in sample_paths.values():
      +485                    if p.exists():
      +486                        raise FileExistsError(
      +487                            f"{p} already exists. use --overwrite to (re-)write outputs anyway."
      +488                        )
      +489        if self.preview:
      +490            print("🛈 bioimageio prediction preview structure:")
      +491            pprint(
      +492                {
      +493                    "{sample_id}": dict(
      +494                        inputs={"{input_id}": "<input path>"},
      +495                        outputs={"{output_id}": "<output path>"},
      +496                    )
      +497                }
      +498            )
      +499            print("🔎 bioimageio prediction preview output:")
      +500            pprint(
      +501                {
      +502                    s: dict(
      +503                        inputs={t: p.as_posix() for t, p in sp_in.items()},
      +504                        outputs={t: p.as_posix() for t, p in sp_out.items()},
      +505                    )
      +506                    for s, sp_in, sp_out in zip(
      +507                        sample_ids, sample_paths_in, sample_paths_out
      +508                    )
      +509                }
      +510            )
      +511            return
      +512
      +513        def input_dataset(stat: Stat):
      +514            for s, sp_in in zip(sample_ids, sample_paths_in):
      +515                yield load_sample_for_model(
      +516                    model=model_descr,
      +517                    paths=sp_in,
      +518                    stat=stat,
      +519                    sample_id=s,
      +520                )
      +521
      +522        stat: Dict[Measure, MeasureValue] = dict(
      +523            _get_stat(
      +524                model_descr, input_dataset({}), len(sample_ids), self.stats
      +525            ).items()
      +526        )
      +527
      +528        pp = create_prediction_pipeline(
      +529            model_descr,
      +530            weight_format=None if self.weight_format == "any" else self.weight_format,
      +531        )
      +532        predict_method = (
      +533            pp.predict_sample_with_blocking
      +534            if self.blockwise
      +535            else pp.predict_sample_without_blocking
      +536        )
      +537
      +538        for sample_in, sp_out in tqdm(
      +539            zip(input_dataset(dict(stat)), sample_paths_out),
      +540            total=len(inputs),
      +541            desc=f"predict with {self.descr_id}",
      +542            unit="sample",
      +543        ):
      +544            sample_out = predict_method(sample_in)
      +545            save_sample(sp_out, sample_out)
      +546
      +547
      +548JSON_FILE = "bioimageio-cli.json"
      +549YAML_FILE = "bioimageio-cli.yaml"
      +550
      +551
      +552class Bioimageio(
      +553    BaseSettings,
      +554    cli_parse_args=True,
      +555    cli_prog_name="bioimageio",
      +556    cli_use_class_docs_for_groups=True,
      +557    cli_implicit_flags=True,
      +558    use_attribute_docstrings=True,
      +559):
      +560    """bioimageio - CLI for bioimage.io resources 🦒"""
      +561
      +562    model_config = SettingsConfigDict(
      +563        json_file=JSON_FILE,
      +564        yaml_file=YAML_FILE,
      +565    )
      +566
      +567    validate_format: CliSubCommand[ValidateFormatCmd] = Field(alias="validate-format")
      +568    "Check a resource's metadata format"
      +569
      +570    test: CliSubCommand[TestCmd]
      +571    "Test a bioimageio resource (beyond meta data formatting)"
      +572
      +573    package: CliSubCommand[PackageCmd]
      +574    "Package a resource"
      +575
      +576    predict: CliSubCommand[PredictCmd]
      +577    "Predict with a model resource"
      +578
      +579    @classmethod
      +580    def settings_customise_sources(
      +581        cls,
      +582        settings_cls: Type[BaseSettings],
      +583        init_settings: PydanticBaseSettingsSource,
      +584        env_settings: PydanticBaseSettingsSource,
      +585        dotenv_settings: PydanticBaseSettingsSource,
      +586        file_secret_settings: PydanticBaseSettingsSource,
      +587    ) -> Tuple[PydanticBaseSettingsSource, ...]:
      +588        cli: CliSettingsSource[BaseSettings] = CliSettingsSource(
      +589            settings_cls,
      +590            cli_parse_args=True,
      +591            formatter_class=RawTextHelpFormatter,
      +592        )
      +593        sys_args = pformat(sys.argv)
      +594        logger.info("starting CLI with arguments:\n{}", sys_args)
      +595        return (
      +596            cli,
      +597            init_settings,
      +598            YamlConfigSettingsSource(settings_cls),
      +599            JsonConfigSettingsSource(settings_cls),
      +600        )
      +601
      +602    @model_validator(mode="before")
      +603    @classmethod
      +604    def _log(cls, data: Any):
      +605        logger.info(
      +606            "loaded CLI input:\n{}",
      +607            pformat({k: v for k, v in data.items() if v is not None}),
      +608        )
      +609        return data
      +610
      +611    def run(self):
      +612        logger.info(
      +613            "executing CLI command:\n{}",
      +614            pformat({k: v for k, v in self.model_dump().items() if v is not None}),
      +615        )
      +616        cmd = self.validate_format or self.test or self.package or self.predict
      +617        assert cmd is not None
      +618        cmd.run()
      +619
      +620
      +621assert isinstance(Bioimageio.__doc__, str)
      +622Bioimageio.__doc__ += f"""
      +623
      +624library versions:
      +625  bioimageio.core {VERSION}
      +626  bioimageio.spec {VERSION}
      +627
      +628spec format versions:
      +629        model RDF {ModelDescr.implemented_format_version}
      +630      dataset RDF {DatasetDescr.implemented_format_version}
      +631     notebook RDF {NotebookDescr.implemented_format_version}
      +632
      +633"""
      +634
      +635
      +636def _get_sample_ids(
      +637    input_paths: Sequence[Mapping[MemberId, Path]],
      +638) -> Sequence[SampleId]:
      +639    """Get sample ids for given input paths, based on the common path per sample.
      +640
      +641    Falls back to sample01, samle02, etc..."""
      +642
      +643    matcher = SequenceMatcher()
      +644
      +645    def get_common_seq(seqs: Sequence[Sequence[str]]) -> Sequence[str]:
      +646        """extract a common sequence from multiple sequences
      +647        (order sensitive; strips whitespace and slashes)
      +648        """
      +649        common = seqs[0]
      +650
      +651        for seq in seqs[1:]:
      +652            if not seq:
      +653                continue
      +654            matcher.set_seqs(common, seq)
      +655            i, _, size = matcher.find_longest_match()
      +656            common = common[i : i + size]
      +657
      +658        if isinstance(common, str):
      +659            common = common.strip().strip("/")
      +660        else:
      +661            common = [cs for c in common if (cs := c.strip().strip("/"))]
      +662
      +663        if not common:
      +664            raise ValueError(f"failed to find common sequence for {seqs}")
      +665
      +666        return common
      +667
      +668    def get_shorter_diff(seqs: Sequence[Sequence[str]]) -> List[Sequence[str]]:
      +669        """get a shorter sequence whose entries are still unique
      +670        (order sensitive, not minimal sequence)
      +671        """
      +672        min_seq_len = min(len(s) for s in seqs)
      +673        # cut from the start
      +674        for start in range(min_seq_len - 1, -1, -1):
      +675            shortened = [s[start:] for s in seqs]
      +676            if len(set(shortened)) == len(seqs):
      +677                min_seq_len -= start
      +678                break
      +679        else:
      +680            seen: Set[Sequence[str]] = set()
      +681            dupes = [s for s in seqs if s in seen or seen.add(s)]
      +682            raise ValueError(f"Found duplicate entries {dupes}")
      +683
      +684        # cut from the end
      +685        for end in range(min_seq_len - 1, 1, -1):
      +686            shortened = [s[:end] for s in shortened]
      +687            if len(set(shortened)) == len(seqs):
      +688                break
      +689
      +690        return shortened
      +691
      +692    full_tensor_ids = [
      +693        sorted(
      +694            p.resolve().with_suffix("").as_posix() for p in input_sample_paths.values()
      +695        )
      +696        for input_sample_paths in input_paths
      +697    ]
      +698    try:
      +699        long_sample_ids = [get_common_seq(t) for t in full_tensor_ids]
      +700        sample_ids = get_shorter_diff(long_sample_ids)
      +701    except ValueError as e:
      +702        raise ValueError(f"failed to extract sample ids: {e}")
      +703
      +704    return sample_ids
      +
      + + +
      +
      +
      + yaml = +<ruyaml.main.YAML object> + + +
      + + + + +
      +
      + +
      + + class + CmdBase(pydantic.main.BaseModel): + + + +
      + +
      80class CmdBase(BaseModel, use_attribute_docstrings=True, cli_implicit_flags=True):
      +81    pass
      +
      + + +

      Usage docs: https://docs.pydantic.dev/2.9/concepts/models/

      + +

      A base class for creating Pydantic models.

      + +
      Attributes:
      + +
        +
      • __class_vars__: The names of the class variables defined on the model.
      • +
      • __private_attributes__: Metadata about the private attributes of the model.
      • +
      • __signature__: The synthesized __init__ [Signature][inspect.Signature] of the model.
      • +
      • __pydantic_complete__: Whether model building is completed, or if there are still undefined fields.
      • +
      • __pydantic_core_schema__: The core schema of the model.
      • +
      • __pydantic_custom_init__: Whether the model has a custom __init__ function.
      • +
      • __pydantic_decorators__: Metadata containing the decorators defined on the model. +This replaces Model.__validators__ and Model.__root_validators__ from Pydantic V1.
      • +
      • __pydantic_generic_metadata__: Metadata for generic models; contains data used for a similar purpose to +__args__, __origin__, __parameters__ in typing-module generics. May eventually be replaced by these.
      • +
      • __pydantic_parent_namespace__: Parent namespace of the model, used for automatic rebuilding of models.
      • +
      • __pydantic_post_init__: The name of the post-init method for the model, if defined.
      • +
      • __pydantic_root_model__: Whether the model is a [RootModel][pydantic.root_model.RootModel].
      • +
      • __pydantic_serializer__: The pydantic-core SchemaSerializer used to dump instances of the model.
      • +
      • __pydantic_validator__: The pydantic-core SchemaValidator used to validate instances of the model.
      • +
      • __pydantic_extra__: A dictionary containing extra values, if [extra][pydantic.config.ConfigDict.extra] +is set to 'allow'.
      • +
      • __pydantic_fields_set__: The names of fields explicitly set during instantiation.
      • +
      • __pydantic_private__: Values of private attributes set on the model instance.
      • +
      +
      + + +
      +
      + +
      + + class + ArgMixin(pydantic.main.BaseModel): + + + +
      + +
      84class ArgMixin(BaseModel, use_attribute_docstrings=True, cli_implicit_flags=True):
      +85    pass
      +
      + + +

      Usage docs: https://docs.pydantic.dev/2.9/concepts/models/

      + +

      A base class for creating Pydantic models.

      + +
      Attributes:
      + +
        +
      • __class_vars__: The names of the class variables defined on the model.
      • +
      • __private_attributes__: Metadata about the private attributes of the model.
      • +
      • __signature__: The synthesized __init__ [Signature][inspect.Signature] of the model.
      • +
      • __pydantic_complete__: Whether model building is completed, or if there are still undefined fields.
      • +
      • __pydantic_core_schema__: The core schema of the model.
      • +
      • __pydantic_custom_init__: Whether the model has a custom __init__ function.
      • +
      • __pydantic_decorators__: Metadata containing the decorators defined on the model. +This replaces Model.__validators__ and Model.__root_validators__ from Pydantic V1.
      • +
      • __pydantic_generic_metadata__: Metadata for generic models; contains data used for a similar purpose to +__args__, __origin__, __parameters__ in typing-module generics. May eventually be replaced by these.
      • +
      • __pydantic_parent_namespace__: Parent namespace of the model, used for automatic rebuilding of models.
      • +
      • __pydantic_post_init__: The name of the post-init method for the model, if defined.
      • +
      • __pydantic_root_model__: Whether the model is a [RootModel][pydantic.root_model.RootModel].
      • +
      • __pydantic_serializer__: The pydantic-core SchemaSerializer used to dump instances of the model.
      • +
      • __pydantic_validator__: The pydantic-core SchemaValidator used to validate instances of the model.
      • +
      • __pydantic_extra__: A dictionary containing extra values, if [extra][pydantic.config.ConfigDict.extra] +is set to 'allow'.
      • +
      • __pydantic_fields_set__: The names of fields explicitly set during instantiation.
      • +
      • __pydantic_private__: Values of private attributes set on the model instance.
      • +
      +
      + + +
      +
      + +
      + + class + WithSource(ArgMixin): + + + +
      + +
       88class WithSource(ArgMixin):
      + 89    source: CliPositionalArg[str]
      + 90    """Url/path to a bioimageio.yaml/rdf.yaml file
      + 91    or a bioimage.io resource identifier, e.g. 'affable-shark'"""
      + 92
      + 93    @cached_property
      + 94    def descr(self):
      + 95        return load_description(self.source)
      + 96
      + 97    @property
      + 98    def descr_id(self) -> str:
      + 99        """a more user-friendly description id
      +100        (replacing legacy ids with their nicknames)
      +101        """
      +102        if isinstance(self.descr, InvalidDescr):
      +103            return str(getattr(self.descr, "id", getattr(self.descr, "name")))
      +104        else:
      +105            return str(
      +106                (
      +107                    (bio_config := self.descr.config.get("bioimageio", {}))
      +108                    and isinstance(bio_config, dict)
      +109                    and bio_config.get("nickname")
      +110                )
      +111                or self.descr.id
      +112                or self.descr.name
      +113            )
      +
      + + +

      Usage docs: https://docs.pydantic.dev/2.9/concepts/models/

      + +

      A base class for creating Pydantic models.

      + +
      Attributes:
      + +
        +
      • __class_vars__: The names of the class variables defined on the model.
      • +
      • __private_attributes__: Metadata about the private attributes of the model.
      • +
      • __signature__: The synthesized __init__ [Signature][inspect.Signature] of the model.
      • +
      • __pydantic_complete__: Whether model building is completed, or if there are still undefined fields.
      • +
      • __pydantic_core_schema__: The core schema of the model.
      • +
      • __pydantic_custom_init__: Whether the model has a custom __init__ function.
      • +
      • __pydantic_decorators__: Metadata containing the decorators defined on the model. +This replaces Model.__validators__ and Model.__root_validators__ from Pydantic V1.
      • +
      • __pydantic_generic_metadata__: Metadata for generic models; contains data used for a similar purpose to +__args__, __origin__, __parameters__ in typing-module generics. May eventually be replaced by these.
      • +
      • __pydantic_parent_namespace__: Parent namespace of the model, used for automatic rebuilding of models.
      • +
      • __pydantic_post_init__: The name of the post-init method for the model, if defined.
      • +
      • __pydantic_root_model__: Whether the model is a [RootModel][pydantic.root_model.RootModel].
      • +
      • __pydantic_serializer__: The pydantic-core SchemaSerializer used to dump instances of the model.
      • +
      • __pydantic_validator__: The pydantic-core SchemaValidator used to validate instances of the model.
      • +
      • __pydantic_extra__: A dictionary containing extra values, if [extra][pydantic.config.ConfigDict.extra] +is set to 'allow'.
      • +
      • __pydantic_fields_set__: The names of fields explicitly set during instantiation.
      • +
      • __pydantic_private__: Values of private attributes set on the model instance.
      • +
      +
      + + +
      +
      + source: Annotated[str, <class 'pydantic_settings.sources._CliPositionalArg'>] + + +
      + + +

      Url/path to a bioimageio.yaml/rdf.yaml file +or a bioimage.io resource identifier, e.g. 'affable-shark'

      +
      + + +
      +
      + +
      + descr + + + +
      + +
      93    @cached_property
      +94    def descr(self):
      +95        return load_description(self.source)
      +
      + + + + +
      +
      + +
      + descr_id: str + + + +
      + +
       97    @property
      + 98    def descr_id(self) -> str:
      + 99        """a more user-friendly description id
      +100        (replacing legacy ids with their nicknames)
      +101        """
      +102        if isinstance(self.descr, InvalidDescr):
      +103            return str(getattr(self.descr, "id", getattr(self.descr, "name")))
      +104        else:
      +105            return str(
      +106                (
      +107                    (bio_config := self.descr.config.get("bioimageio", {}))
      +108                    and isinstance(bio_config, dict)
      +109                    and bio_config.get("nickname")
      +110                )
      +111                or self.descr.id
      +112                or self.descr.name
      +113            )
      +
      + + +

      a more user-friendly description id +(replacing legacy ids with their nicknames)

      +
      + + +
      +
      +
      + +
      + + class + ValidateFormatCmd(CmdBase, WithSource): + + + +
      + +
      116class ValidateFormatCmd(CmdBase, WithSource):
      +117    """validate the meta data format of a bioimageio resource."""
      +118
      +119    def run(self):
      +120        sys.exit(validate_format(self.descr))
      +
      + + +

      validate the meta data format of a bioimageio resource.

      +
      + + +
      + +
      + + def + run(self): + + + +
      + +
      119    def run(self):
      +120        sys.exit(validate_format(self.descr))
      +
      + + + + +
      +
      +
      Inherited Members
      +
      + +
      +
      +
      +
      + +
      + + class + TestCmd(CmdBase, WithSource): + + + +
      + +
      123class TestCmd(CmdBase, WithSource):
      +124    """Test a bioimageio resource (beyond meta data formatting)"""
      +125
      +126    weight_format: WeightFormatArgAll = "all"
      +127    """The weight format to limit testing to.
      +128
      +129    (only relevant for model resources)"""
      +130
      +131    devices: Optional[Union[str, Sequence[str]]] = None
      +132    """Device(s) to use for testing"""
      +133
      +134    decimal: int = 4
      +135    """Precision for numerical comparisons"""
      +136
      +137    def run(self):
      +138        sys.exit(
      +139            test(
      +140                self.descr,
      +141                weight_format=self.weight_format,
      +142                devices=self.devices,
      +143                decimal=self.decimal,
      +144            )
      +145        )
      +
      + + +

      Test a bioimageio resource (beyond meta data formatting)

      +
      + + +
      +
      + weight_format: Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript', 'all'] + + +
      + + +

      The weight format to limit testing to.

      + +

      (only relevant for model resources)

      +
      + + +
      +
      +
      + devices: Union[str, Sequence[str], NoneType] + + +
      + + +

      Device(s) to use for testing

      +
      + + +
      +
      +
      + decimal: int + + +
      + + +

      Precision for numerical comparisons

      +
      + + +
      +
      + +
      + + def + run(self): + + + +
      + +
      137    def run(self):
      +138        sys.exit(
      +139            test(
      +140                self.descr,
      +141                weight_format=self.weight_format,
      +142                devices=self.devices,
      +143                decimal=self.decimal,
      +144            )
      +145        )
      +
      + + + + +
      +
      +
      Inherited Members
      +
      + +
      +
      +
      +
      + +
      + + class + PackageCmd(CmdBase, WithSource): + + + +
      + +
      148class PackageCmd(CmdBase, WithSource):
      +149    """save a resource's metadata with its associated files."""
      +150
      +151    path: CliPositionalArg[Path]
      +152    """The path to write the (zipped) package to.
      +153    If it does not have a `.zip` suffix
      +154    this command will save the package as an unzipped folder instead."""
      +155
      +156    weight_format: WeightFormatArgAll = "all"
      +157    """The weight format to include in the package (for model descriptions only)."""
      +158
      +159    def run(self):
      +160        if isinstance(self.descr, InvalidDescr):
      +161            self.descr.validation_summary.display()
      +162            raise ValueError("resource description is invalid")
      +163
      +164        sys.exit(
      +165            package(
      +166                self.descr,
      +167                self.path,
      +168                weight_format=self.weight_format,
      +169            )
      +170        )
      +
      + + +

      save a resource's metadata with its associated files.

      +
      + + +
      +
      + path: Annotated[pathlib.Path, <class 'pydantic_settings.sources._CliPositionalArg'>] + + +
      + + +

      The path to write the (zipped) package to. +If it does not have a .zip suffix +this command will save the package as an unzipped folder instead.

      +
      + + +
      +
      +
      + weight_format: Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript', 'all'] + + +
      + + +

      The weight format to include in the package (for model descriptions only).

      +
      + + +
      +
      + +
      + + def + run(self): + + + +
      + +
      159    def run(self):
      +160        if isinstance(self.descr, InvalidDescr):
      +161            self.descr.validation_summary.display()
      +162            raise ValueError("resource description is invalid")
      +163
      +164        sys.exit(
      +165            package(
      +166                self.descr,
      +167                self.path,
      +168                weight_format=self.weight_format,
      +169            )
      +170        )
      +
      + + + + +
      +
      +
      Inherited Members
      +
      + +
      +
      +
      +
      + +
      + + class + PredictCmd(CmdBase, WithSource): + + + +
      + +
      207class PredictCmd(CmdBase, WithSource):
      +208    """Run inference on your data with a bioimage.io model."""
      +209
      +210    inputs: NotEmpty[Sequence[Union[str, NotEmpty[Tuple[str, ...]]]]] = (
      +211        "{input_id}/001.tif",
      +212    )
      +213    """Model input sample paths (for each input tensor)
      +214
      +215    The input paths are expected to have shape...
      +216     - (n_samples,) or (n_samples,1) for models expecting a single input tensor
      +217     - (n_samples,) containing the substring '{input_id}', or
      +218     - (n_samples, n_model_inputs) to provide each input tensor path explicitly.
      +219
      +220    All substrings that are replaced by metadata from the model description:
      +221    - '{model_id}'
      +222    - '{input_id}'
      +223
      +224    Example inputs to process sample 'a' and 'b'
      +225    for a model expecting a 'raw' and a 'mask' input tensor:
      +226    --inputs="[[\"a_raw.tif\",\"a_mask.tif\"],[\"b_raw.tif\",\"b_mask.tif\"]]"
      +227    (Note that JSON double quotes need to be escaped.)
      +228
      +229    Alternatively a `bioimageio-cli.yaml` (or `bioimageio-cli.json`) file
      +230    may provide the arguments, e.g.:
      +231    ```yaml
      +232    inputs:
      +233    - [a_raw.tif, a_mask.tif]
      +234    - [b_raw.tif, b_mask.tif]
      +235    ```
      +236
      +237    `.npy` and any file extension supported by imageio are supported.
      +238     Aavailable formats are listed at
      +239    https://imageio.readthedocs.io/en/stable/formats/index.html#all-formats.
      +240    Some formats have additional dependencies.
      +241
      +242
      +243    """
      +244
      +245    outputs: Union[str, NotEmpty[Tuple[str, ...]]] = (
      +246        "outputs_{model_id}/{output_id}/{sample_id}.tif"
      +247    )
      +248    """Model output path pattern (per output tensor)
      +249
      +250    All substrings that are replaced:
      +251    - '{model_id}' (from model description)
      +252    - '{output_id}' (from model description)
      +253    - '{sample_id}' (extracted from input paths)
      +254
      +255
      +256    """
      +257
      +258    overwrite: bool = False
      +259    """allow overwriting existing output files"""
      +260
      +261    blockwise: bool = False
      +262    """process inputs blockwise"""
      +263
      +264    stats: Path = Path("dataset_statistics.json")
      +265    """path to dataset statistics
      +266    (will be written if it does not exist,
      +267    but the model requires statistical dataset measures)
      +268     """
      +269
      +270    preview: bool = False
      +271    """preview which files would be processed
      +272    and what outputs would be generated."""
      +273
      +274    weight_format: WeightFormatArgAny = "any"
      +275    """The weight format to use."""
      +276
      +277    example: bool = False
      +278    """generate and run an example
      +279
      +280    1. downloads example model inputs
      +281    2. creates a `{model_id}_example` folder
      +282    3. writes input arguments to `{model_id}_example/bioimageio-cli.yaml`
      +283    4. executes a preview dry-run
      +284    5. executes prediction with example input
      +285
      +286
      +287    """
      +288
      +289    def _example(self):
      +290        model_descr = ensure_description_is_model(self.descr)
      +291        input_ids = get_member_ids(model_descr.inputs)
      +292        example_inputs = (
      +293            model_descr.sample_inputs
      +294            if isinstance(model_descr, v0_4.ModelDescr)
      +295            else [ipt.sample_tensor or ipt.test_tensor for ipt in model_descr.inputs]
      +296        )
      +297        if not example_inputs:
      +298            raise ValueError(f"{self.descr_id} does not specify any example inputs.")
      +299
      +300        inputs001: List[str] = []
      +301        example_path = Path(f"{self.descr_id}_example")
      +302        example_path.mkdir(exist_ok=True)
      +303
      +304        for t, src in zip(input_ids, example_inputs):
      +305            local = download(src).path
      +306            dst = Path(f"{example_path}/{t}/001{''.join(local.suffixes)}")
      +307            dst.parent.mkdir(parents=True, exist_ok=True)
      +308            inputs001.append(dst.as_posix())
      +309            if isinstance(local, Path):
      +310                shutil.copy(local, dst)
      +311            elif isinstance(local, ZipPath):
      +312                _ = local.root.extract(local.at, path=dst)
      +313            else:
      +314                assert_never(local)
      +315
      +316        inputs = [tuple(inputs001)]
      +317        output_pattern = f"{example_path}/outputs/{{output_id}}/{{sample_id}}.tif"
      +318
      +319        bioimageio_cli_path = example_path / YAML_FILE
      +320        stats_file = "dataset_statistics.json"
      +321        stats = (example_path / stats_file).as_posix()
      +322        yaml.dump(
      +323            dict(
      +324                inputs=inputs,
      +325                outputs=output_pattern,
      +326                stats=stats_file,
      +327                blockwise=self.blockwise,
      +328            ),
      +329            bioimageio_cli_path,
      +330        )
      +331
      +332        yaml_file_content = None
      +333
      +334        # escaped double quotes
      +335        inputs_json = json.dumps(inputs)
      +336        inputs_escaped = inputs_json.replace('"', r"\"")
      +337        source_escaped = self.source.replace('"', r"\"")
      +338
      +339        def get_example_command(preview: bool, escape: bool = False):
      +340            q: str = '"' if escape else ""
      +341
      +342            return [
      +343                "bioimageio",
      +344                "predict",
      +345                # --no-preview not supported for py=3.8
      +346                *(["--preview"] if preview else []),
      +347                "--overwrite",
      +348                *(["--blockwise"] if self.blockwise else []),
      +349                f"--stats={q}{stats}{q}",
      +350                f"--inputs={q}{inputs_escaped if escape else inputs_json}{q}",
      +351                f"--outputs={q}{output_pattern}{q}",
      +352                f"{q}{source_escaped if escape else self.source}{q}",
      +353            ]
      +354
      +355        if Path(YAML_FILE).exists():
      +356            logger.info(
      +357                "temporarily removing '{}' to execute example prediction", YAML_FILE
      +358            )
      +359            yaml_file_content = Path(YAML_FILE).read_bytes()
      +360            Path(YAML_FILE).unlink()
      +361
      +362        try:
      +363            _ = subprocess.run(get_example_command(True), check=True)
      +364            _ = subprocess.run(get_example_command(False), check=True)
      +365        finally:
      +366            if yaml_file_content is not None:
      +367                _ = Path(YAML_FILE).write_bytes(yaml_file_content)
      +368                logger.debug("restored '{}'", YAML_FILE)
      +369
      +370        print(
      +371            "🎉 Sucessfully ran example prediction!\n"
      +372            + "To predict the example input using the CLI example config file"
      +373            + f" {example_path/YAML_FILE}, execute `bioimageio predict` from {example_path}:\n"
      +374            + f"$ cd {str(example_path)}\n"
      +375            + f'$ bioimageio predict "{source_escaped}"\n\n'
      +376            + "Alternatively run the following command"
      +377            + " in the current workind directory, not the example folder:\n$ "
      +378            + " ".join(get_example_command(False, escape=True))
      +379            + f"\n(note that a local '{JSON_FILE}' or '{YAML_FILE}' may interfere with this)"
      +380        )
      +381
      +382    def run(self):
      +383        if self.example:
      +384            return self._example()
      +385
      +386        model_descr = ensure_description_is_model(self.descr)
      +387
      +388        input_ids = get_member_ids(model_descr.inputs)
      +389        output_ids = get_member_ids(model_descr.outputs)
      +390
      +391        minimum_input_ids = tuple(
      +392            str(ipt.id) if isinstance(ipt, v0_5.InputTensorDescr) else str(ipt.name)
      +393            for ipt in model_descr.inputs
      +394            if not isinstance(ipt, v0_5.InputTensorDescr) or not ipt.optional
      +395        )
      +396        maximum_input_ids = tuple(
      +397            str(ipt.id) if isinstance(ipt, v0_5.InputTensorDescr) else str(ipt.name)
      +398            for ipt in model_descr.inputs
      +399        )
      +400
      +401        def expand_inputs(i: int, ipt: Union[str, Tuple[str, ...]]) -> Tuple[str, ...]:
      +402            if isinstance(ipt, str):
      +403                ipts = tuple(
      +404                    ipt.format(model_id=self.descr_id, input_id=t) for t in input_ids
      +405                )
      +406            else:
      +407                ipts = tuple(
      +408                    p.format(model_id=self.descr_id, input_id=t)
      +409                    for t, p in zip(input_ids, ipt)
      +410                )
      +411
      +412            if len(set(ipts)) < len(ipts):
      +413                if len(minimum_input_ids) == len(maximum_input_ids):
      +414                    n = len(minimum_input_ids)
      +415                else:
      +416                    n = f"{len(minimum_input_ids)}-{len(maximum_input_ids)}"
      +417
      +418                raise ValueError(
      +419                    f"[input sample #{i}] Include '{{input_id}}' in path pattern or explicitly specify {n} distinct input paths (got {ipt})"
      +420                )
      +421
      +422            if len(ipts) < len(minimum_input_ids):
      +423                raise ValueError(
      +424                    f"[input sample #{i}] Expected at least {len(minimum_input_ids)} inputs {minimum_input_ids}, got {ipts}"
      +425                )
      +426
      +427            if len(ipts) > len(maximum_input_ids):
      +428                raise ValueError(
      +429                    f"Expected at most {len(maximum_input_ids)} inputs {maximum_input_ids}, got {ipts}"
      +430                )
      +431
      +432            return ipts
      +433
      +434        inputs = [expand_inputs(i, ipt) for i, ipt in enumerate(self.inputs, start=1)]
      +435
      +436        sample_paths_in = [
      +437            {t: Path(p) for t, p in zip(input_ids, ipts)} for ipts in inputs
      +438        ]
      +439
      +440        sample_ids = _get_sample_ids(sample_paths_in)
      +441
      +442        def expand_outputs():
      +443            if isinstance(self.outputs, str):
      +444                outputs = [
      +445                    tuple(
      +446                        Path(
      +447                            self.outputs.format(
      +448                                model_id=self.descr_id, output_id=t, sample_id=s
      +449                            )
      +450                        )
      +451                        for t in output_ids
      +452                    )
      +453                    for s in sample_ids
      +454                ]
      +455            else:
      +456                outputs = [
      +457                    tuple(
      +458                        Path(p.format(model_id=self.descr_id, output_id=t, sample_id=s))
      +459                        for t, p in zip(output_ids, self.outputs)
      +460                    )
      +461                    for s in sample_ids
      +462                ]
      +463
      +464            for i, out in enumerate(outputs, start=1):
      +465                if len(set(out)) < len(out):
      +466                    raise ValueError(
      +467                        f"[output sample #{i}] Include '{{output_id}}' in path pattern or explicitly specify {len(output_ids)} distinct output paths (got {out})"
      +468                    )
      +469
      +470                if len(out) != len(output_ids):
      +471                    raise ValueError(
      +472                        f"[output sample #{i}] Expected {len(output_ids)} outputs {output_ids}, got {out}"
      +473                    )
      +474
      +475            return outputs
      +476
      +477        outputs = expand_outputs()
      +478
      +479        sample_paths_out = [
      +480            {MemberId(t): Path(p) for t, p in zip(output_ids, out)} for out in outputs
      +481        ]
      +482
      +483        if not self.overwrite:
      +484            for sample_paths in sample_paths_out:
      +485                for p in sample_paths.values():
      +486                    if p.exists():
      +487                        raise FileExistsError(
      +488                            f"{p} already exists. use --overwrite to (re-)write outputs anyway."
      +489                        )
      +490        if self.preview:
      +491            print("🛈 bioimageio prediction preview structure:")
      +492            pprint(
      +493                {
      +494                    "{sample_id}": dict(
      +495                        inputs={"{input_id}": "<input path>"},
      +496                        outputs={"{output_id}": "<output path>"},
      +497                    )
      +498                }
      +499            )
      +500            print("🔎 bioimageio prediction preview output:")
      +501            pprint(
      +502                {
      +503                    s: dict(
      +504                        inputs={t: p.as_posix() for t, p in sp_in.items()},
      +505                        outputs={t: p.as_posix() for t, p in sp_out.items()},
      +506                    )
      +507                    for s, sp_in, sp_out in zip(
      +508                        sample_ids, sample_paths_in, sample_paths_out
      +509                    )
      +510                }
      +511            )
      +512            return
      +513
      +514        def input_dataset(stat: Stat):
      +515            for s, sp_in in zip(sample_ids, sample_paths_in):
      +516                yield load_sample_for_model(
      +517                    model=model_descr,
      +518                    paths=sp_in,
      +519                    stat=stat,
      +520                    sample_id=s,
      +521                )
      +522
      +523        stat: Dict[Measure, MeasureValue] = dict(
      +524            _get_stat(
      +525                model_descr, input_dataset({}), len(sample_ids), self.stats
      +526            ).items()
      +527        )
      +528
      +529        pp = create_prediction_pipeline(
      +530            model_descr,
      +531            weight_format=None if self.weight_format == "any" else self.weight_format,
      +532        )
      +533        predict_method = (
      +534            pp.predict_sample_with_blocking
      +535            if self.blockwise
      +536            else pp.predict_sample_without_blocking
      +537        )
      +538
      +539        for sample_in, sp_out in tqdm(
      +540            zip(input_dataset(dict(stat)), sample_paths_out),
      +541            total=len(inputs),
      +542            desc=f"predict with {self.descr_id}",
      +543            unit="sample",
      +544        ):
      +545            sample_out = predict_method(sample_in)
      +546            save_sample(sp_out, sample_out)
      +
      + + +

      Run inference on your data with a bioimage.io model.

      +
      + + +
      +
      + inputs: Annotated[Sequence[Union[str, Annotated[Tuple[str, ...], MinLen(min_length=1)]]], MinLen(min_length=1)] + + +
      + + +

      Model input sample paths (for each input tensor)

      + +

      The input paths are expected to have shape...

      + +
        +
      • (n_samples,) or (n_samples,1) for models expecting a single input tensor
      • +
      • (n_samples,) containing the substring '{input_id}', or
      • +
      • (n_samples, n_model_inputs) to provide each input tensor path explicitly.
      • +
      + +

      All substrings that are replaced by metadata from the model description:

      + +
        +
      • '{model_id}'
      • +
      • '{input_id}'
      • +
      + +

      Example inputs to process sample 'a' and 'b' +for a model expecting a 'raw' and a 'mask' input tensor: +--inputs="[["a_raw.tif","a_mask.tif"],["b_raw.tif","b_mask.tif"]]" +(Note that JSON double quotes need to be escaped.)

      + +

      Alternatively a bioimageio-cli.yaml (or bioimageio-cli.json) file +may provide the arguments, e.g.:

      + +
      +
      inputs:
      +- [a_raw.tif, a_mask.tif]
      +- [b_raw.tif, b_mask.tif]
      +
      +
      + +

      .npy and any file extension supported by imageio are supported. + Aavailable formats are listed at +https://imageio.readthedocs.io/en/stable/formats/index.html#all-formats. +Some formats have additional dependencies.

      +
      + + +
      +
      +
      + outputs: Union[str, Annotated[Tuple[str, ...], MinLen(min_length=1)]] + + +
      + + +

      Model output path pattern (per output tensor)

      + +

      All substrings that are replaced:

      + +
        +
      • '{model_id}' (from model description)
      • +
      • '{output_id}' (from model description)
      • +
      • '{sample_id}' (extracted from input paths)
      • +
      +
      + + +
      +
      +
      + overwrite: bool + + +
      + + +

      allow overwriting existing output files

      +
      + + +
      +
      +
      + blockwise: bool + + +
      + + +

      process inputs blockwise

      +
      + + +
      +
      +
      + stats: pathlib.Path + + +
      + + +

      path to dataset statistics +(will be written if it does not exist, +but the model requires statistical dataset measures)

      +
      + + +
      +
      +
      + preview: bool + + +
      + + +

      preview which files would be processed +and what outputs would be generated.

      +
      + + +
      +
      +
      + weight_format: Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript', 'any'] + + +
      + + +

      The weight format to use.

      +
      + + +
      +
      +
      + example: bool + + +
      + + +

      generate and run an example

      + +
        +
      1. downloads example model inputs
      2. +
      3. creates a {model_id}_example folder
      4. +
      5. writes input arguments to {model_id}_example/bioimageio-cli.yaml
      6. +
      7. executes a preview dry-run
      8. +
      9. executes prediction with example input
      10. +
      +
      + + +
      +
      + +
      + + def + run(self): + + + +
      + +
      382    def run(self):
      +383        if self.example:
      +384            return self._example()
      +385
      +386        model_descr = ensure_description_is_model(self.descr)
      +387
      +388        input_ids = get_member_ids(model_descr.inputs)
      +389        output_ids = get_member_ids(model_descr.outputs)
      +390
      +391        minimum_input_ids = tuple(
      +392            str(ipt.id) if isinstance(ipt, v0_5.InputTensorDescr) else str(ipt.name)
      +393            for ipt in model_descr.inputs
      +394            if not isinstance(ipt, v0_5.InputTensorDescr) or not ipt.optional
      +395        )
      +396        maximum_input_ids = tuple(
      +397            str(ipt.id) if isinstance(ipt, v0_5.InputTensorDescr) else str(ipt.name)
      +398            for ipt in model_descr.inputs
      +399        )
      +400
      +401        def expand_inputs(i: int, ipt: Union[str, Tuple[str, ...]]) -> Tuple[str, ...]:
      +402            if isinstance(ipt, str):
      +403                ipts = tuple(
      +404                    ipt.format(model_id=self.descr_id, input_id=t) for t in input_ids
      +405                )
      +406            else:
      +407                ipts = tuple(
      +408                    p.format(model_id=self.descr_id, input_id=t)
      +409                    for t, p in zip(input_ids, ipt)
      +410                )
      +411
      +412            if len(set(ipts)) < len(ipts):
      +413                if len(minimum_input_ids) == len(maximum_input_ids):
      +414                    n = len(minimum_input_ids)
      +415                else:
      +416                    n = f"{len(minimum_input_ids)}-{len(maximum_input_ids)}"
      +417
      +418                raise ValueError(
      +419                    f"[input sample #{i}] Include '{{input_id}}' in path pattern or explicitly specify {n} distinct input paths (got {ipt})"
      +420                )
      +421
      +422            if len(ipts) < len(minimum_input_ids):
      +423                raise ValueError(
      +424                    f"[input sample #{i}] Expected at least {len(minimum_input_ids)} inputs {minimum_input_ids}, got {ipts}"
      +425                )
      +426
      +427            if len(ipts) > len(maximum_input_ids):
      +428                raise ValueError(
      +429                    f"Expected at most {len(maximum_input_ids)} inputs {maximum_input_ids}, got {ipts}"
      +430                )
      +431
      +432            return ipts
      +433
      +434        inputs = [expand_inputs(i, ipt) for i, ipt in enumerate(self.inputs, start=1)]
      +435
      +436        sample_paths_in = [
      +437            {t: Path(p) for t, p in zip(input_ids, ipts)} for ipts in inputs
      +438        ]
      +439
      +440        sample_ids = _get_sample_ids(sample_paths_in)
      +441
      +442        def expand_outputs():
      +443            if isinstance(self.outputs, str):
      +444                outputs = [
      +445                    tuple(
      +446                        Path(
      +447                            self.outputs.format(
      +448                                model_id=self.descr_id, output_id=t, sample_id=s
      +449                            )
      +450                        )
      +451                        for t in output_ids
      +452                    )
      +453                    for s in sample_ids
      +454                ]
      +455            else:
      +456                outputs = [
      +457                    tuple(
      +458                        Path(p.format(model_id=self.descr_id, output_id=t, sample_id=s))
      +459                        for t, p in zip(output_ids, self.outputs)
      +460                    )
      +461                    for s in sample_ids
      +462                ]
      +463
      +464            for i, out in enumerate(outputs, start=1):
      +465                if len(set(out)) < len(out):
      +466                    raise ValueError(
      +467                        f"[output sample #{i}] Include '{{output_id}}' in path pattern or explicitly specify {len(output_ids)} distinct output paths (got {out})"
      +468                    )
      +469
      +470                if len(out) != len(output_ids):
      +471                    raise ValueError(
      +472                        f"[output sample #{i}] Expected {len(output_ids)} outputs {output_ids}, got {out}"
      +473                    )
      +474
      +475            return outputs
      +476
      +477        outputs = expand_outputs()
      +478
      +479        sample_paths_out = [
      +480            {MemberId(t): Path(p) for t, p in zip(output_ids, out)} for out in outputs
      +481        ]
      +482
      +483        if not self.overwrite:
      +484            for sample_paths in sample_paths_out:
      +485                for p in sample_paths.values():
      +486                    if p.exists():
      +487                        raise FileExistsError(
      +488                            f"{p} already exists. use --overwrite to (re-)write outputs anyway."
      +489                        )
      +490        if self.preview:
      +491            print("🛈 bioimageio prediction preview structure:")
      +492            pprint(
      +493                {
      +494                    "{sample_id}": dict(
      +495                        inputs={"{input_id}": "<input path>"},
      +496                        outputs={"{output_id}": "<output path>"},
      +497                    )
      +498                }
      +499            )
      +500            print("🔎 bioimageio prediction preview output:")
      +501            pprint(
      +502                {
      +503                    s: dict(
      +504                        inputs={t: p.as_posix() for t, p in sp_in.items()},
      +505                        outputs={t: p.as_posix() for t, p in sp_out.items()},
      +506                    )
      +507                    for s, sp_in, sp_out in zip(
      +508                        sample_ids, sample_paths_in, sample_paths_out
      +509                    )
      +510                }
      +511            )
      +512            return
      +513
      +514        def input_dataset(stat: Stat):
      +515            for s, sp_in in zip(sample_ids, sample_paths_in):
      +516                yield load_sample_for_model(
      +517                    model=model_descr,
      +518                    paths=sp_in,
      +519                    stat=stat,
      +520                    sample_id=s,
      +521                )
      +522
      +523        stat: Dict[Measure, MeasureValue] = dict(
      +524            _get_stat(
      +525                model_descr, input_dataset({}), len(sample_ids), self.stats
      +526            ).items()
      +527        )
      +528
      +529        pp = create_prediction_pipeline(
      +530            model_descr,
      +531            weight_format=None if self.weight_format == "any" else self.weight_format,
      +532        )
      +533        predict_method = (
      +534            pp.predict_sample_with_blocking
      +535            if self.blockwise
      +536            else pp.predict_sample_without_blocking
      +537        )
      +538
      +539        for sample_in, sp_out in tqdm(
      +540            zip(input_dataset(dict(stat)), sample_paths_out),
      +541            total=len(inputs),
      +542            desc=f"predict with {self.descr_id}",
      +543            unit="sample",
      +544        ):
      +545            sample_out = predict_method(sample_in)
      +546            save_sample(sp_out, sample_out)
      +
      + + + + +
      +
      +
      Inherited Members
      +
      + +
      +
      +
      +
      +
      + JSON_FILE = +'bioimageio-cli.json' + + +
      + + + + +
      +
      +
      + YAML_FILE = +'bioimageio-cli.yaml' + + +
      + + + + +
      +
      + +
      + + class + Bioimageio(pydantic_settings.main.BaseSettings): + + + +
      + +
      553class Bioimageio(
      +554    BaseSettings,
      +555    cli_parse_args=True,
      +556    cli_prog_name="bioimageio",
      +557    cli_use_class_docs_for_groups=True,
      +558    cli_implicit_flags=True,
      +559    use_attribute_docstrings=True,
      +560):
      +561    """bioimageio - CLI for bioimage.io resources 🦒"""
      +562
      +563    model_config = SettingsConfigDict(
      +564        json_file=JSON_FILE,
      +565        yaml_file=YAML_FILE,
      +566    )
      +567
      +568    validate_format: CliSubCommand[ValidateFormatCmd] = Field(alias="validate-format")
      +569    "Check a resource's metadata format"
      +570
      +571    test: CliSubCommand[TestCmd]
      +572    "Test a bioimageio resource (beyond meta data formatting)"
      +573
      +574    package: CliSubCommand[PackageCmd]
      +575    "Package a resource"
      +576
      +577    predict: CliSubCommand[PredictCmd]
      +578    "Predict with a model resource"
      +579
      +580    @classmethod
      +581    def settings_customise_sources(
      +582        cls,
      +583        settings_cls: Type[BaseSettings],
      +584        init_settings: PydanticBaseSettingsSource,
      +585        env_settings: PydanticBaseSettingsSource,
      +586        dotenv_settings: PydanticBaseSettingsSource,
      +587        file_secret_settings: PydanticBaseSettingsSource,
      +588    ) -> Tuple[PydanticBaseSettingsSource, ...]:
      +589        cli: CliSettingsSource[BaseSettings] = CliSettingsSource(
      +590            settings_cls,
      +591            cli_parse_args=True,
      +592            formatter_class=RawTextHelpFormatter,
      +593        )
      +594        sys_args = pformat(sys.argv)
      +595        logger.info("starting CLI with arguments:\n{}", sys_args)
      +596        return (
      +597            cli,
      +598            init_settings,
      +599            YamlConfigSettingsSource(settings_cls),
      +600            JsonConfigSettingsSource(settings_cls),
      +601        )
      +602
      +603    @model_validator(mode="before")
      +604    @classmethod
      +605    def _log(cls, data: Any):
      +606        logger.info(
      +607            "loaded CLI input:\n{}",
      +608            pformat({k: v for k, v in data.items() if v is not None}),
      +609        )
      +610        return data
      +611
      +612    def run(self):
      +613        logger.info(
      +614            "executing CLI command:\n{}",
      +615            pformat({k: v for k, v in self.model_dump().items() if v is not None}),
      +616        )
      +617        cmd = self.validate_format or self.test or self.package or self.predict
      +618        assert cmd is not None
      +619        cmd.run()
      +
      + + +

      bioimageio - CLI for bioimage.io resources 🦒

      + +

      library versions: + bioimageio.core 0.7.0 + bioimageio.spec 0.7.0

      + +

      spec format versions: + model RDF 0.5.3 + dataset RDF 0.3.0 + notebook RDF 0.3.0

      +
      + + +
      +
      + validate_format: Annotated[Optional[ValidateFormatCmd], <class 'pydantic_settings.sources._CliSubCommand'>] + + +
      + + +

      Check a resource's metadata format

      +
      + + +
      +
      +
      + test: Annotated[Optional[TestCmd], <class 'pydantic_settings.sources._CliSubCommand'>] + + +
      + + +

      Test a bioimageio resource (beyond meta data formatting)

      +
      + + +
      +
      +
      + package: Annotated[Optional[PackageCmd], <class 'pydantic_settings.sources._CliSubCommand'>] + + +
      + + +

      Package a resource

      +
      + + +
      +
      +
      + predict: Annotated[Optional[PredictCmd], <class 'pydantic_settings.sources._CliSubCommand'>] + + +
      + + +

      Predict with a model resource

      +
      + + +
      +
      + +
      +
      @classmethod
      + + def + settings_customise_sources( cls, settings_cls: Type[pydantic_settings.main.BaseSettings], init_settings: pydantic_settings.sources.PydanticBaseSettingsSource, env_settings: pydantic_settings.sources.PydanticBaseSettingsSource, dotenv_settings: pydantic_settings.sources.PydanticBaseSettingsSource, file_secret_settings: pydantic_settings.sources.PydanticBaseSettingsSource) -> Tuple[pydantic_settings.sources.PydanticBaseSettingsSource, ...]: + + + +
      + +
      580    @classmethod
      +581    def settings_customise_sources(
      +582        cls,
      +583        settings_cls: Type[BaseSettings],
      +584        init_settings: PydanticBaseSettingsSource,
      +585        env_settings: PydanticBaseSettingsSource,
      +586        dotenv_settings: PydanticBaseSettingsSource,
      +587        file_secret_settings: PydanticBaseSettingsSource,
      +588    ) -> Tuple[PydanticBaseSettingsSource, ...]:
      +589        cli: CliSettingsSource[BaseSettings] = CliSettingsSource(
      +590            settings_cls,
      +591            cli_parse_args=True,
      +592            formatter_class=RawTextHelpFormatter,
      +593        )
      +594        sys_args = pformat(sys.argv)
      +595        logger.info("starting CLI with arguments:\n{}", sys_args)
      +596        return (
      +597            cli,
      +598            init_settings,
      +599            YamlConfigSettingsSource(settings_cls),
      +600            JsonConfigSettingsSource(settings_cls),
      +601        )
      +
      + + +

      Define the sources and their order for loading the settings values.

      + +
      Arguments:
      + +
        +
      • settings_cls: The Settings class.
      • +
      • init_settings: The InitSettingsSource instance.
      • +
      • env_settings: The EnvSettingsSource instance.
      • +
      • dotenv_settings: The DotEnvSettingsSource instance.
      • +
      • file_secret_settings: The SecretsSettingsSource instance.
      • +
      + +
      Returns:
      + +
      +

      A tuple containing the sources and their order for loading the settings values.

      +
      +
      + + +
      +
      + +
      + + def + run(self): + + + +
      + +
      612    def run(self):
      +613        logger.info(
      +614            "executing CLI command:\n{}",
      +615            pformat({k: v for k, v in self.model_dump().items() if v is not None}),
      +616        )
      +617        cmd = self.validate_format or self.test or self.package or self.predict
      +618        assert cmd is not None
      +619        cmd.run()
      +
      + + + + +
      +
      +
      + + \ No newline at end of file diff --git a/bioimageio/core/commands.html b/bioimageio/core/commands.html new file mode 100644 index 00000000..2d41a545 --- /dev/null +++ b/bioimageio/core/commands.html @@ -0,0 +1,544 @@ + + + + + + + bioimageio.core.commands API documentation + + + + + + + + + + +
      +
      +

      +bioimageio.core.commands

      + +

      These functions implement the logic of the bioimageio command line interface +defined in bioimageio.core.cli.

      +
      + + + + + +
       1"""These functions implement the logic of the bioimageio command line interface
      + 2defined in `bioimageio.core.cli`."""
      + 3
      + 4from pathlib import Path
      + 5from typing import Optional, Sequence, Union
      + 6
      + 7from typing_extensions import Literal
      + 8
      + 9from bioimageio.spec import (
      +10    InvalidDescr,
      +11    ResourceDescr,
      +12    save_bioimageio_package,
      +13    save_bioimageio_package_as_folder,
      +14)
      +15from bioimageio.spec.model.v0_5 import WeightsFormat
      +16
      +17from ._resource_tests import test_description
      +18
      +19WeightFormatArgAll = Literal[WeightsFormat, "all"]
      +20WeightFormatArgAny = Literal[WeightsFormat, "any"]
      +21
      +22
      +23def test(
      +24    descr: Union[ResourceDescr, InvalidDescr],
      +25    *,
      +26    weight_format: WeightFormatArgAll = "all",
      +27    devices: Optional[Union[str, Sequence[str]]] = None,
      +28    decimal: int = 4,
      +29) -> int:
      +30    """test a bioimageio resource
      +31
      +32    Args:
      +33        source: Path or URL to the bioimageio resource description file
      +34                (bioimageio.yaml or rdf.yaml) or to a zipped resource
      +35        weight_format: (model only) The weight format to use
      +36        devices: Device(s) to use for testing
      +37        decimal: Precision for numerical comparisons
      +38    """
      +39    if isinstance(descr, InvalidDescr):
      +40        descr.validation_summary.display()
      +41        return 1
      +42
      +43    summary = test_description(
      +44        descr,
      +45        weight_format=None if weight_format == "all" else weight_format,
      +46        devices=[devices] if isinstance(devices, str) else devices,
      +47        decimal=decimal,
      +48    )
      +49    summary.display()
      +50    return 0 if summary.status == "passed" else 1
      +51
      +52
      +53def validate_format(
      +54    descr: Union[ResourceDescr, InvalidDescr],
      +55):
      +56    """validate the meta data format of a bioimageio resource
      +57
      +58    Args:
      +59        descr: a bioimageio resource description
      +60    """
      +61    descr.validation_summary.display()
      +62    return 0 if descr.validation_summary.status == "passed" else 1
      +63
      +64
      +65def package(
      +66    descr: ResourceDescr, path: Path, *, weight_format: WeightFormatArgAll = "all"
      +67):
      +68    """Save a resource's metadata with its associated files.
      +69
      +70    Note: If `path` does not have a `.zip` suffix this command will save the
      +71          package as an unzipped folder instead.
      +72
      +73    Args:
      +74        descr: a bioimageio resource description
      +75        path: output path
      +76        weight-format: include only this single weight-format (if not 'all').
      +77    """
      +78    if isinstance(descr, InvalidDescr):
      +79        descr.validation_summary.display()
      +80        raise ValueError("resource description is invalid")
      +81
      +82    if weight_format == "all":
      +83        weights_priority_order = None
      +84    else:
      +85        weights_priority_order = (weight_format,)
      +86
      +87    if path.suffix == ".zip":
      +88        _ = save_bioimageio_package(
      +89            descr,
      +90            output_path=path,
      +91            weights_priority_order=weights_priority_order,
      +92        )
      +93    else:
      +94        _ = save_bioimageio_package_as_folder(
      +95            descr,
      +96            output_path=path,
      +97            weights_priority_order=weights_priority_order,
      +98        )
      +99    return 0
      +
      + + +
      +
      +
      + WeightFormatArgAll = + + typing.Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript', 'all'] + + +
      + + + + +
      +
      +
      + WeightFormatArgAny = + + typing.Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript', 'any'] + + +
      + + + + +
      +
      + +
      + + def + test( descr: Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.NotebookDescr, bioimageio.spec.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.spec.InvalidDescr], *, weight_format: Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript', 'all'] = 'all', devices: Union[str, Sequence[str], NoneType] = None, decimal: int = 4) -> int: + + + +
      + +
      24def test(
      +25    descr: Union[ResourceDescr, InvalidDescr],
      +26    *,
      +27    weight_format: WeightFormatArgAll = "all",
      +28    devices: Optional[Union[str, Sequence[str]]] = None,
      +29    decimal: int = 4,
      +30) -> int:
      +31    """test a bioimageio resource
      +32
      +33    Args:
      +34        source: Path or URL to the bioimageio resource description file
      +35                (bioimageio.yaml or rdf.yaml) or to a zipped resource
      +36        weight_format: (model only) The weight format to use
      +37        devices: Device(s) to use for testing
      +38        decimal: Precision for numerical comparisons
      +39    """
      +40    if isinstance(descr, InvalidDescr):
      +41        descr.validation_summary.display()
      +42        return 1
      +43
      +44    summary = test_description(
      +45        descr,
      +46        weight_format=None if weight_format == "all" else weight_format,
      +47        devices=[devices] if isinstance(devices, str) else devices,
      +48        decimal=decimal,
      +49    )
      +50    summary.display()
      +51    return 0 if summary.status == "passed" else 1
      +
      + + +

      test a bioimageio resource

      + +
      Arguments:
      + +
        +
      • source: Path or URL to the bioimageio resource description file +(bioimageio.yaml or rdf.yaml) or to a zipped resource
      • +
      • weight_format: (model only) The weight format to use
      • +
      • devices: Device(s) to use for testing
      • +
      • decimal: Precision for numerical comparisons
      • +
      +
      + + +
      +
      + +
      + + def + validate_format( descr: Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.NotebookDescr, bioimageio.spec.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.spec.InvalidDescr]): + + + +
      + +
      54def validate_format(
      +55    descr: Union[ResourceDescr, InvalidDescr],
      +56):
      +57    """validate the meta data format of a bioimageio resource
      +58
      +59    Args:
      +60        descr: a bioimageio resource description
      +61    """
      +62    descr.validation_summary.display()
      +63    return 0 if descr.validation_summary.status == "passed" else 1
      +
      + + +

      validate the meta data format of a bioimageio resource

      + +
      Arguments:
      + +
        +
      • descr: a bioimageio resource description
      • +
      +
      + + +
      +
      + +
      + + def + package( descr: Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.NotebookDescr, bioimageio.spec.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], path: pathlib.Path, *, weight_format: Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript', 'all'] = 'all'): + + + +
      + +
       66def package(
      + 67    descr: ResourceDescr, path: Path, *, weight_format: WeightFormatArgAll = "all"
      + 68):
      + 69    """Save a resource's metadata with its associated files.
      + 70
      + 71    Note: If `path` does not have a `.zip` suffix this command will save the
      + 72          package as an unzipped folder instead.
      + 73
      + 74    Args:
      + 75        descr: a bioimageio resource description
      + 76        path: output path
      + 77        weight-format: include only this single weight-format (if not 'all').
      + 78    """
      + 79    if isinstance(descr, InvalidDescr):
      + 80        descr.validation_summary.display()
      + 81        raise ValueError("resource description is invalid")
      + 82
      + 83    if weight_format == "all":
      + 84        weights_priority_order = None
      + 85    else:
      + 86        weights_priority_order = (weight_format,)
      + 87
      + 88    if path.suffix == ".zip":
      + 89        _ = save_bioimageio_package(
      + 90            descr,
      + 91            output_path=path,
      + 92            weights_priority_order=weights_priority_order,
      + 93        )
      + 94    else:
      + 95        _ = save_bioimageio_package_as_folder(
      + 96            descr,
      + 97            output_path=path,
      + 98            weights_priority_order=weights_priority_order,
      + 99        )
      +100    return 0
      +
      + + +

      Save a resource's metadata with its associated files.

      + +

      Note: If path does not have a .zip suffix this command will save the + package as an unzipped folder instead.

      + +
      Arguments:
      + +
        +
      • descr: a bioimageio resource description
      • +
      • path: output path
      • +
      • weight-format: include only this single weight-format (if not 'all').
      • +
      +
      + + +
      +
      + + \ No newline at end of file diff --git a/bioimageio/core/common.html b/bioimageio/core/common.html new file mode 100644 index 00000000..250265a5 --- /dev/null +++ b/bioimageio/core/common.html @@ -0,0 +1,864 @@ + + + + + + + bioimageio.core.common API documentation + + + + + + + + + + +
      +
      +

      +bioimageio.core.common

      + + + + + + +
        1from __future__ import annotations
      +  2
      +  3from types import MappingProxyType
      +  4from typing import (
      +  5    Hashable,
      +  6    Literal,
      +  7    Mapping,
      +  8    NamedTuple,
      +  9    Tuple,
      + 10    TypeVar,
      + 11    Union,
      + 12)
      + 13
      + 14from typing_extensions import Self, assert_never
      + 15
      + 16from bioimageio.spec.model import v0_5
      + 17
      + 18DTypeStr = Literal[
      + 19    "bool",
      + 20    "float32",
      + 21    "float64",
      + 22    "int8",
      + 23    "int16",
      + 24    "int32",
      + 25    "int64",
      + 26    "uint8",
      + 27    "uint16",
      + 28    "uint32",
      + 29    "uint64",
      + 30]
      + 31
      + 32
      + 33_LeftRight_T = TypeVar("_LeftRight_T", bound="_LeftRight")
      + 34_LeftRightLike = Union[int, Tuple[int, int], _LeftRight_T]
      + 35
      + 36
      + 37class _LeftRight(NamedTuple):
      + 38    left: int
      + 39    right: int
      + 40
      + 41    @classmethod
      + 42    def create(cls, like: _LeftRightLike[Self]) -> Self:
      + 43        if isinstance(like, cls):
      + 44            return like
      + 45        elif isinstance(like, tuple):
      + 46            return cls(*like)
      + 47        elif isinstance(like, int):
      + 48            return cls(like, like)
      + 49        else:
      + 50            assert_never(like)
      + 51
      + 52
      + 53_Where = Literal["left", "right", "left_and_right"]
      + 54
      + 55
      + 56class CropWidth(_LeftRight):
      + 57    pass
      + 58
      + 59
      + 60CropWidthLike = _LeftRightLike[CropWidth]
      + 61CropWhere = _Where
      + 62
      + 63
      + 64class Halo(_LeftRight):
      + 65    pass
      + 66
      + 67
      + 68HaloLike = _LeftRightLike[Halo]
      + 69
      + 70
      + 71class OverlapWidth(_LeftRight):
      + 72    pass
      + 73
      + 74
      + 75class PadWidth(_LeftRight):
      + 76    pass
      + 77
      + 78
      + 79PadWidthLike = _LeftRightLike[PadWidth]
      + 80PadMode = Literal["edge", "reflect", "symmetric"]
      + 81PadWhere = _Where
      + 82
      + 83
      + 84class SliceInfo(NamedTuple):
      + 85    start: int
      + 86    stop: int
      + 87
      + 88
      + 89SampleId = Hashable
      + 90MemberId = v0_5.TensorId
      + 91T = TypeVar("T")
      + 92PerMember = Mapping[MemberId, T]
      + 93
      + 94BlockIndex = int
      + 95TotalNumberOfBlocks = int
      + 96
      + 97
      + 98K = TypeVar("K", bound=Hashable)
      + 99V = TypeVar("V")
      +100
      +101Frozen = MappingProxyType
      +102# class Frozen(Mapping[K, V]):  # adapted from xarray.core.utils.Frozen
      +103#     """Wrapper around an object implementing the mapping interface to make it
      +104#     immutable."""
      +105
      +106#     __slots__ = ("mapping",)
      +107
      +108#     def __init__(self, mapping: Mapping[K, V]):
      +109#         super().__init__()
      +110#         self.mapping = deepcopy(
      +111#             mapping
      +112#         )  # added deepcopy (compared to xarray.core.utils.Frozen)
      +113
      +114#     def __getitem__(self, key: K) -> V:
      +115#         return self.mapping[key]
      +116
      +117#     def __iter__(self) -> Iterator[K]:
      +118#         return iter(self.mapping)
      +119
      +120#     def __len__(self) -> int:
      +121#         return len(self.mapping)
      +122
      +123#     def __contains__(self, key: object) -> bool:
      +124#         return key in self.mapping
      +125
      +126#     def __repr__(self) -> str:
      +127#         return f"{type(self).__name__}({self.mapping!r})"
      +
      + + +
      +
      +
      + DTypeStr = + + typing.Literal['bool', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] + + +
      + + + + +
      +
      + +
      + + class + CropWidth(typing.NamedTuple): + + + +
      + +
      57class CropWidth(_LeftRight):
      +58    pass
      +
      + + +

      _LeftRight(left, right)

      +
      + + +
      +
      + + CropWidth(left: int, right: int) + + +
      + + +

      Create new instance of _LeftRight(left, right)

      +
      + + +
      +
      +
      Inherited Members
      +
      + +
      +
      +
      +
      +
      + CropWidthLike = +typing.Union[int, typing.Tuple[int, int], CropWidth] + + +
      + + + + +
      +
      +
      + CropWhere = +typing.Literal['left', 'right', 'left_and_right'] + + +
      + + + + +
      +
      + +
      + + class + Halo(typing.NamedTuple): + + + +
      + +
      65class Halo(_LeftRight):
      +66    pass
      +
      + + +

      _LeftRight(left, right)

      +
      + + +
      +
      + + Halo(left: int, right: int) + + +
      + + +

      Create new instance of _LeftRight(left, right)

      +
      + + +
      +
      +
      Inherited Members
      +
      + +
      +
      +
      +
      +
      + HaloLike = +typing.Union[int, typing.Tuple[int, int], Halo] + + +
      + + + + +
      +
      + +
      + + class + OverlapWidth(typing.NamedTuple): + + + +
      + +
      72class OverlapWidth(_LeftRight):
      +73    pass
      +
      + + +

      _LeftRight(left, right)

      +
      + + +
      +
      + + OverlapWidth(left: int, right: int) + + +
      + + +

      Create new instance of _LeftRight(left, right)

      +
      + + +
      +
      +
      Inherited Members
      +
      + +
      +
      +
      +
      + +
      + + class + PadWidth(typing.NamedTuple): + + + +
      + +
      76class PadWidth(_LeftRight):
      +77    pass
      +
      + + +

      _LeftRight(left, right)

      +
      + + +
      +
      + + PadWidth(left: int, right: int) + + +
      + + +

      Create new instance of _LeftRight(left, right)

      +
      + + +
      +
      +
      Inherited Members
      +
      + +
      +
      +
      +
      +
      + PadWidthLike = +typing.Union[int, typing.Tuple[int, int], PadWidth] + + +
      + + + + +
      +
      +
      + PadMode = +typing.Literal['edge', 'reflect', 'symmetric'] + + +
      + + + + +
      +
      +
      + PadWhere = +typing.Literal['left', 'right', 'left_and_right'] + + +
      + + + + +
      +
      + +
      + + class + SliceInfo(typing.NamedTuple): + + + +
      + +
      85class SliceInfo(NamedTuple):
      +86    start: int
      +87    stop: int
      +
      + + +

      SliceInfo(start, stop)

      +
      + + +
      +
      + + SliceInfo(start: int, stop: int) + + +
      + + +

      Create new instance of SliceInfo(start, stop)

      +
      + + +
      +
      +
      + start: int + + +
      + + +

      Alias for field number 0

      +
      + + +
      +
      +
      + stop: int + + +
      + + +

      Alias for field number 1

      +
      + + +
      +
      +
      +
      + SampleId = +typing.Hashable + + +
      + + + + +
      +
      +
      + MemberId = +<class 'bioimageio.spec.model.v0_5.TensorId'> + + +
      + + + + +
      +
      +
      + PerMember = +typing.Mapping[bioimageio.spec.model.v0_5.TensorId, ~T] + + +
      + + + + +
      +
      +
      + BlockIndex = +<class 'int'> + + +
      + + + + +
      +
      +
      + TotalNumberOfBlocks = +<class 'int'> + + +
      + + + + +
      +
      +
      + Frozen = +<class 'mappingproxy'> + + +
      + + + + +
      +
      + + \ No newline at end of file diff --git a/bioimageio/core/digest_spec.html b/bioimageio/core/digest_spec.html index ef53711a..5f298da9 100644 --- a/bioimageio/core/digest_spec.html +++ b/bioimageio/core/digest_spec.html @@ -86,7 +86,7 @@

      API Documentation

    -
    bioimageio.core 0.6.10
    +
    bioimageio.core 0.7.0
    built with pdoc 23from numpy.typing import NDArray 24from typing_extensions import Unpack, assert_never 25 - 26from bioimageio.core.common import MemberId, PerMember, SampleId - 27from bioimageio.core.io import load_tensor - 28from bioimageio.core.sample import Sample - 29from bioimageio.spec._internal.io_utils import HashKwargs, download - 30from bioimageio.spec.common import FileSource - 31from bioimageio.spec.model import AnyModelDescr, v0_4, v0_5 - 32from bioimageio.spec.model.v0_4 import CallableFromDepencency, CallableFromFile - 33from bioimageio.spec.model.v0_5 import ( - 34 ArchitectureFromFileDescr, - 35 ArchitectureFromLibraryDescr, - 36 ParameterizedSize_N, - 37) - 38from bioimageio.spec.utils import load_array - 39 - 40from .axis import AxisId, AxisInfo, AxisLike, PerAxis - 41from .block_meta import split_multiple_shapes_into_blocks - 42from .common import Halo, MemberId, PerMember, SampleId, TotalNumberOfBlocks - 43from .sample import ( - 44 LinearSampleAxisTransform, - 45 Sample, - 46 SampleBlockMeta, - 47 sample_block_meta_generator, - 48) - 49from .stat_measures import Stat - 50from .tensor import Tensor + 26from bioimageio.spec._internal.io import resolve_and_extract + 27from bioimageio.spec._internal.io_utils import HashKwargs + 28from bioimageio.spec.common import FileSource + 29from bioimageio.spec.model import AnyModelDescr, v0_4, v0_5 + 30from bioimageio.spec.model.v0_4 import CallableFromDepencency, CallableFromFile + 31from bioimageio.spec.model.v0_5 import ( + 32 ArchitectureFromFileDescr, + 33 ArchitectureFromLibraryDescr, + 34 ParameterizedSize_N, + 35) + 36from bioimageio.spec.utils import load_array + 37 + 38from .axis import AxisId, AxisInfo, AxisLike, PerAxis + 39from .block_meta import split_multiple_shapes_into_blocks + 40from .common import Halo, MemberId, PerMember, SampleId, TotalNumberOfBlocks + 41from .io import load_tensor + 42from .sample import ( + 43 LinearSampleAxisTransform, + 44 Sample, + 45 SampleBlockMeta, + 46 sample_block_meta_generator, + 47) + 48from .stat_measures import Stat + 49from .tensor import Tensor + 50 51 - 52 - 53def import_callable( - 54 node: Union[CallableFromDepencency, ArchitectureFromLibraryDescr], - 55 /, - 56 **kwargs: Unpack[HashKwargs], - 57) -> Callable[..., Any]: - 58 """import a callable (e.g. a torch.nn.Module) from a spec node describing it""" - 59 if isinstance(node, CallableFromDepencency): - 60 module = importlib.import_module(node.module_name) - 61 c = getattr(module, str(node.callable_name)) - 62 elif isinstance(node, ArchitectureFromLibraryDescr): - 63 module = importlib.import_module(node.import_from) - 64 c = getattr(module, str(node.callable)) - 65 elif isinstance(node, CallableFromFile): - 66 c = _import_from_file_impl(node.source_file, str(node.callable_name), **kwargs) - 67 elif isinstance(node, ArchitectureFromFileDescr): - 68 c = _import_from_file_impl(node.source, str(node.callable), sha256=node.sha256) - 69 - 70 else: - 71 assert_never(node) - 72 - 73 if not callable(c): - 74 raise ValueError(f"{node} (imported: {c}) is not callable") - 75 - 76 return c + 52def import_callable( + 53 node: Union[CallableFromDepencency, ArchitectureFromLibraryDescr], + 54 /, + 55 **kwargs: Unpack[HashKwargs], + 56) -> Callable[..., Any]: + 57 """import a callable (e.g. a torch.nn.Module) from a spec node describing it""" + 58 if isinstance(node, CallableFromDepencency): + 59 module = importlib.import_module(node.module_name) + 60 c = getattr(module, str(node.callable_name)) + 61 elif isinstance(node, ArchitectureFromLibraryDescr): + 62 module = importlib.import_module(node.import_from) + 63 c = getattr(module, str(node.callable)) + 64 elif isinstance(node, CallableFromFile): + 65 c = _import_from_file_impl(node.source_file, str(node.callable_name), **kwargs) + 66 elif isinstance(node, ArchitectureFromFileDescr): + 67 c = _import_from_file_impl(node.source, str(node.callable), sha256=node.sha256) + 68 + 69 else: + 70 assert_never(node) + 71 + 72 if not callable(c): + 73 raise ValueError(f"{node} (imported: {c}) is not callable") + 74 + 75 return c + 76 77 - 78 - 79def _import_from_file_impl( - 80 source: FileSource, callable_name: str, **kwargs: Unpack[HashKwargs] - 81): - 82 local_file = download(source, **kwargs) - 83 module_name = local_file.path.stem - 84 importlib_spec = importlib.util.spec_from_file_location( - 85 module_name, local_file.path - 86 ) - 87 if importlib_spec is None: - 88 raise ImportError(f"Failed to import {module_name} from {source}.") - 89 - 90 dep = importlib.util.module_from_spec(importlib_spec) - 91 importlib_spec.loader.exec_module(dep) # type: ignore # todo: possible to use "loader.load_module"? - 92 return getattr(dep, callable_name) + 78def _import_from_file_impl( + 79 source: FileSource, callable_name: str, **kwargs: Unpack[HashKwargs] + 80): + 81 local_file = resolve_and_extract(source, **kwargs) + 82 module_name = local_file.path.stem + 83 importlib_spec = importlib.util.spec_from_file_location( + 84 module_name, local_file.path + 85 ) + 86 if importlib_spec is None: + 87 raise ImportError(f"Failed to import {module_name} from {source}.") + 88 + 89 dep = importlib.util.module_from_spec(importlib_spec) + 90 importlib_spec.loader.exec_module(dep) # type: ignore # todo: possible to use "loader.load_module"? + 91 return getattr(dep, callable_name) + 92 93 - 94 - 95def get_axes_infos( - 96 io_descr: Union[ - 97 v0_4.InputTensorDescr, - 98 v0_4.OutputTensorDescr, - 99 v0_5.InputTensorDescr, -100 v0_5.OutputTensorDescr, -101 ] -102) -> List[AxisInfo]: -103 """get a unified, simplified axis representation from spec axes""" -104 return [ -105 ( -106 AxisInfo.create("i") -107 if isinstance(a, str) and a not in ("b", "i", "t", "c", "z", "y", "x") -108 else AxisInfo.create(a) -109 ) -110 for a in io_descr.axes -111 ] + 94def get_axes_infos( + 95 io_descr: Union[ + 96 v0_4.InputTensorDescr, + 97 v0_4.OutputTensorDescr, + 98 v0_5.InputTensorDescr, + 99 v0_5.OutputTensorDescr, +100 ], +101) -> List[AxisInfo]: +102 """get a unified, simplified axis representation from spec axes""" +103 return [ +104 ( +105 AxisInfo.create("i") +106 if isinstance(a, str) and a not in ("b", "i", "t", "c", "z", "y", "x") +107 else AxisInfo.create(a) +108 ) +109 for a in io_descr.axes +110 ] +111 112 -113 -114def get_member_id( -115 tensor_description: Union[ -116 v0_4.InputTensorDescr, -117 v0_4.OutputTensorDescr, -118 v0_5.InputTensorDescr, -119 v0_5.OutputTensorDescr, -120 ] -121) -> MemberId: -122 """get the normalized tensor ID, usable as a sample member ID""" -123 -124 if isinstance(tensor_description, (v0_4.InputTensorDescr, v0_4.OutputTensorDescr)): -125 return MemberId(tensor_description.name) -126 elif isinstance( -127 tensor_description, (v0_5.InputTensorDescr, v0_5.OutputTensorDescr) -128 ): -129 return tensor_description.id -130 else: -131 assert_never(tensor_description) +113def get_member_id( +114 tensor_description: Union[ +115 v0_4.InputTensorDescr, +116 v0_4.OutputTensorDescr, +117 v0_5.InputTensorDescr, +118 v0_5.OutputTensorDescr, +119 ], +120) -> MemberId: +121 """get the normalized tensor ID, usable as a sample member ID""" +122 +123 if isinstance(tensor_description, (v0_4.InputTensorDescr, v0_4.OutputTensorDescr)): +124 return MemberId(tensor_description.name) +125 elif isinstance( +126 tensor_description, (v0_5.InputTensorDescr, v0_5.OutputTensorDescr) +127 ): +128 return tensor_description.id +129 else: +130 assert_never(tensor_description) +131 132 -133 -134def get_member_ids( -135 tensor_descriptions: Sequence[ -136 Union[ -137 v0_4.InputTensorDescr, -138 v0_4.OutputTensorDescr, -139 v0_5.InputTensorDescr, -140 v0_5.OutputTensorDescr, -141 ] -142 ] -143) -> List[MemberId]: -144 """get normalized tensor IDs to be used as sample member IDs""" -145 return [get_member_id(descr) for descr in tensor_descriptions] +133def get_member_ids( +134 tensor_descriptions: Sequence[ +135 Union[ +136 v0_4.InputTensorDescr, +137 v0_4.OutputTensorDescr, +138 v0_5.InputTensorDescr, +139 v0_5.OutputTensorDescr, +140 ] +141 ], +142) -> List[MemberId]: +143 """get normalized tensor IDs to be used as sample member IDs""" +144 return [get_member_id(descr) for descr in tensor_descriptions] +145 146 -147 -148def get_test_inputs(model: AnyModelDescr) -> Sample: -149 """returns a model's test input sample""" -150 member_ids = get_member_ids(model.inputs) -151 if isinstance(model, v0_4.ModelDescr): -152 arrays = [load_array(tt) for tt in model.test_inputs] -153 else: -154 arrays = [load_array(d.test_tensor) for d in model.inputs] -155 -156 axes = [get_axes_infos(t) for t in model.inputs] -157 return Sample( -158 members={ -159 m: Tensor.from_numpy(arr, dims=ax) -160 for m, arr, ax in zip(member_ids, arrays, axes) -161 }, -162 stat={}, -163 id="test-input", -164 ) +147def get_test_inputs(model: AnyModelDescr) -> Sample: +148 """returns a model's test input sample""" +149 member_ids = get_member_ids(model.inputs) +150 if isinstance(model, v0_4.ModelDescr): +151 arrays = [load_array(tt) for tt in model.test_inputs] +152 else: +153 arrays = [load_array(d.test_tensor) for d in model.inputs] +154 +155 axes = [get_axes_infos(t) for t in model.inputs] +156 return Sample( +157 members={ +158 m: Tensor.from_numpy(arr, dims=ax) +159 for m, arr, ax in zip(member_ids, arrays, axes) +160 }, +161 stat={}, +162 id="test-sample", +163 ) +164 165 -166 -167def get_test_outputs(model: AnyModelDescr) -> Sample: -168 """returns a model's test output sample""" -169 member_ids = get_member_ids(model.outputs) -170 -171 if isinstance(model, v0_4.ModelDescr): -172 arrays = [load_array(tt) for tt in model.test_outputs] -173 else: -174 arrays = [load_array(d.test_tensor) for d in model.outputs] -175 -176 axes = [get_axes_infos(t) for t in model.outputs] -177 -178 return Sample( -179 members={ -180 m: Tensor.from_numpy(arr, dims=ax) -181 for m, arr, ax in zip(member_ids, arrays, axes) -182 }, -183 stat={}, -184 id="test-output", -185 ) +166def get_test_outputs(model: AnyModelDescr) -> Sample: +167 """returns a model's test output sample""" +168 member_ids = get_member_ids(model.outputs) +169 +170 if isinstance(model, v0_4.ModelDescr): +171 arrays = [load_array(tt) for tt in model.test_outputs] +172 else: +173 arrays = [load_array(d.test_tensor) for d in model.outputs] +174 +175 axes = [get_axes_infos(t) for t in model.outputs] +176 +177 return Sample( +178 members={ +179 m: Tensor.from_numpy(arr, dims=ax) +180 for m, arr, ax in zip(member_ids, arrays, axes) +181 }, +182 stat={}, +183 id="test-sample", +184 ) +185 186 -187 -188class IO_SampleBlockMeta(NamedTuple): -189 input: SampleBlockMeta -190 output: SampleBlockMeta +187class IO_SampleBlockMeta(NamedTuple): +188 input: SampleBlockMeta +189 output: SampleBlockMeta +190 191 -192 -193def get_input_halo(model: v0_5.ModelDescr, output_halo: PerMember[PerAxis[Halo]]): -194 """returns which halo input tensors need to be divided into blocks with such that -195 `output_halo` can be cropped from their outputs without intorducing gaps.""" -196 input_halo: Dict[MemberId, Dict[AxisId, Halo]] = {} -197 outputs = {t.id: t for t in model.outputs} -198 all_tensors = {**{t.id: t for t in model.inputs}, **outputs} -199 -200 for t, th in output_halo.items(): -201 axes = {a.id: a for a in outputs[t].axes} -202 -203 for a, ah in th.items(): -204 s = axes[a].size -205 if not isinstance(s, v0_5.SizeReference): -206 raise ValueError( -207 f"Unable to map output halo for {t}.{a} to an input axis" -208 ) -209 -210 axis = axes[a] -211 ref_axis = {a.id: a for a in all_tensors[s.tensor_id].axes}[s.axis_id] -212 -213 total_output_halo = sum(ah) -214 total_input_halo = total_output_halo * axis.scale / ref_axis.scale -215 assert ( -216 total_input_halo == int(total_input_halo) and total_input_halo % 2 == 0 -217 ) -218 input_halo.setdefault(s.tensor_id, {})[a] = Halo( -219 int(total_input_halo // 2), int(total_input_halo // 2) -220 ) -221 -222 return input_halo +192def get_input_halo(model: v0_5.ModelDescr, output_halo: PerMember[PerAxis[Halo]]): +193 """returns which halo input tensors need to be divided into blocks with, such that +194 `output_halo` can be cropped from their outputs without introducing gaps.""" +195 input_halo: Dict[MemberId, Dict[AxisId, Halo]] = {} +196 outputs = {t.id: t for t in model.outputs} +197 all_tensors = {**{t.id: t for t in model.inputs}, **outputs} +198 +199 for t, th in output_halo.items(): +200 axes = {a.id: a for a in outputs[t].axes} +201 +202 for a, ah in th.items(): +203 s = axes[a].size +204 if not isinstance(s, v0_5.SizeReference): +205 raise ValueError( +206 f"Unable to map output halo for {t}.{a} to an input axis" +207 ) +208 +209 axis = axes[a] +210 ref_axis = {a.id: a for a in all_tensors[s.tensor_id].axes}[s.axis_id] +211 +212 total_output_halo = sum(ah) +213 total_input_halo = total_output_halo * axis.scale / ref_axis.scale +214 assert ( +215 total_input_halo == int(total_input_halo) and total_input_halo % 2 == 0 +216 ) +217 input_halo.setdefault(s.tensor_id, {})[a] = Halo( +218 int(total_input_halo // 2), int(total_input_halo // 2) +219 ) +220 +221 return input_halo +222 223 -224 -225def get_block_transform(model: v0_5.ModelDescr): -226 """returns how a model's output tensor shapes relate to its input shapes""" -227 ret: Dict[MemberId, Dict[AxisId, Union[LinearSampleAxisTransform, int]]] = {} -228 batch_axis_trf = None -229 for ipt in model.inputs: -230 for a in ipt.axes: -231 if a.type == "batch": -232 batch_axis_trf = LinearSampleAxisTransform( -233 axis=a.id, scale=1, offset=0, member=ipt.id -234 ) -235 break -236 if batch_axis_trf is not None: -237 break -238 axis_scales = { -239 t.id: {a.id: a.scale for a in t.axes} -240 for t in chain(model.inputs, model.outputs) -241 } -242 for out in model.outputs: -243 new_axes: Dict[AxisId, Union[LinearSampleAxisTransform, int]] = {} -244 for a in out.axes: -245 if a.size is None: -246 assert a.type == "batch" -247 if batch_axis_trf is None: -248 raise ValueError( -249 "no batch axis found in any input tensor, but output tensor" -250 + f" '{out.id}' has one." -251 ) -252 s = batch_axis_trf -253 elif isinstance(a.size, int): -254 s = a.size -255 elif isinstance(a.size, v0_5.DataDependentSize): -256 s = -1 -257 elif isinstance(a.size, v0_5.SizeReference): -258 s = LinearSampleAxisTransform( -259 axis=a.size.axis_id, -260 scale=axis_scales[a.size.tensor_id][a.size.axis_id] / a.scale, -261 offset=a.size.offset, -262 member=a.size.tensor_id, -263 ) -264 else: -265 assert_never(a.size) -266 -267 new_axes[a.id] = s -268 -269 ret[out.id] = new_axes -270 -271 return ret -272 +224def get_block_transform( +225 model: v0_5.ModelDescr, +226) -> PerMember[PerAxis[Union[LinearSampleAxisTransform, int]]]: +227 """returns how a model's output tensor shapes relates to its input shapes""" +228 ret: Dict[MemberId, Dict[AxisId, Union[LinearSampleAxisTransform, int]]] = {} +229 batch_axis_trf = None +230 for ipt in model.inputs: +231 for a in ipt.axes: +232 if a.type == "batch": +233 batch_axis_trf = LinearSampleAxisTransform( +234 axis=a.id, scale=1, offset=0, member=ipt.id +235 ) +236 break +237 if batch_axis_trf is not None: +238 break +239 axis_scales = { +240 t.id: {a.id: a.scale for a in t.axes} +241 for t in chain(model.inputs, model.outputs) +242 } +243 for out in model.outputs: +244 new_axes: Dict[AxisId, Union[LinearSampleAxisTransform, int]] = {} +245 for a in out.axes: +246 if a.size is None: +247 assert a.type == "batch" +248 if batch_axis_trf is None: +249 raise ValueError( +250 "no batch axis found in any input tensor, but output tensor" +251 + f" '{out.id}' has one." +252 ) +253 s = batch_axis_trf +254 elif isinstance(a.size, int): +255 s = a.size +256 elif isinstance(a.size, v0_5.DataDependentSize): +257 s = -1 +258 elif isinstance(a.size, v0_5.SizeReference): +259 s = LinearSampleAxisTransform( +260 axis=a.size.axis_id, +261 scale=axis_scales[a.size.tensor_id][a.size.axis_id] / a.scale, +262 offset=a.size.offset, +263 member=a.size.tensor_id, +264 ) +265 else: +266 assert_never(a.size) +267 +268 new_axes[a.id] = s +269 +270 ret[out.id] = new_axes +271 +272 return ret 273 -274def get_io_sample_block_metas( -275 model: v0_5.ModelDescr, -276 input_sample_shape: PerMember[PerAxis[int]], -277 ns: Mapping[Tuple[MemberId, AxisId], ParameterizedSize_N], -278 batch_size: int = 1, -279) -> Tuple[TotalNumberOfBlocks, Iterable[IO_SampleBlockMeta]]: -280 """returns an iterable yielding meta data for corresponding input and output samples""" -281 if not isinstance(model, v0_5.ModelDescr): -282 raise TypeError(f"get_block_meta() not implemented for {type(model)}") -283 -284 block_axis_sizes = model.get_axis_sizes(ns=ns, batch_size=batch_size) -285 input_block_shape = { -286 t: {aa: s for (tt, aa), s in block_axis_sizes.inputs.items() if tt == t} -287 for t in {tt for tt, _ in block_axis_sizes.inputs} -288 } -289 output_block_shape = { -290 t: { -291 aa: s -292 for (tt, aa), s in block_axis_sizes.outputs.items() -293 if tt == t and not isinstance(s, tuple) -294 } -295 for t in {tt for tt, _ in block_axis_sizes.outputs} -296 } -297 output_halo = { -298 t.id: { -299 a.id: Halo(a.halo, a.halo) for a in t.axes if isinstance(a, v0_5.WithHalo) -300 } -301 for t in model.outputs -302 } -303 input_halo = get_input_halo(model, output_halo) -304 -305 # TODO: fix output_sample_shape_data_dep -306 # (below only valid if input_sample_shape is a valid model input, -307 # which is not a valid assumption) -308 output_sample_shape_data_dep = model.get_output_tensor_sizes(input_sample_shape) +274 +275def get_io_sample_block_metas( +276 model: v0_5.ModelDescr, +277 input_sample_shape: PerMember[PerAxis[int]], +278 ns: Mapping[Tuple[MemberId, AxisId], ParameterizedSize_N], +279 batch_size: int = 1, +280) -> Tuple[TotalNumberOfBlocks, Iterable[IO_SampleBlockMeta]]: +281 """returns an iterable yielding meta data for corresponding input and output samples""" +282 if not isinstance(model, v0_5.ModelDescr): +283 raise TypeError(f"get_block_meta() not implemented for {type(model)}") +284 +285 block_axis_sizes = model.get_axis_sizes(ns=ns, batch_size=batch_size) +286 input_block_shape = { +287 t: {aa: s for (tt, aa), s in block_axis_sizes.inputs.items() if tt == t} +288 for t in {tt for tt, _ in block_axis_sizes.inputs} +289 } +290 output_halo = { +291 t.id: { +292 a.id: Halo(a.halo, a.halo) for a in t.axes if isinstance(a, v0_5.WithHalo) +293 } +294 for t in model.outputs +295 } +296 input_halo = get_input_halo(model, output_halo) +297 +298 n_input_blocks, input_blocks = split_multiple_shapes_into_blocks( +299 input_sample_shape, input_block_shape, halo=input_halo +300 ) +301 block_transform = get_block_transform(model) +302 return n_input_blocks, ( +303 IO_SampleBlockMeta(ipt, ipt.get_transformed(block_transform)) +304 for ipt in sample_block_meta_generator( +305 input_blocks, sample_shape=input_sample_shape, sample_id=None +306 ) +307 ) +308 309 -310 output_sample_shape = { -311 t: { -312 a: -1 if isinstance(s, tuple) else s -313 for a, s in output_sample_shape_data_dep[t].items() -314 } -315 for t in output_sample_shape_data_dep -316 } -317 n_input_blocks, input_blocks = split_multiple_shapes_into_blocks( -318 input_sample_shape, input_block_shape, halo=input_halo -319 ) -320 n_output_blocks, output_blocks = split_multiple_shapes_into_blocks( -321 output_sample_shape, output_block_shape, halo=output_halo -322 ) -323 assert n_input_blocks == n_output_blocks -324 return n_input_blocks, ( -325 IO_SampleBlockMeta(ipt, out) -326 for ipt, out in zip( -327 sample_block_meta_generator( -328 input_blocks, sample_shape=input_sample_shape, sample_id=None -329 ), -330 sample_block_meta_generator( -331 output_blocks, -332 sample_shape=output_sample_shape, -333 sample_id=None, -334 ), -335 ) -336 ) -337 -338 -339def get_tensor( -340 src: Union[Tensor, xr.DataArray, NDArray[Any], Path], -341 ipt: Union[v0_4.InputTensorDescr, v0_5.InputTensorDescr], -342): -343 """helper to cast/load various tensor sources""" -344 -345 if isinstance(src, Tensor): -346 return src -347 -348 if isinstance(src, xr.DataArray): -349 return Tensor.from_xarray(src) -350 -351 if isinstance(src, np.ndarray): -352 return Tensor.from_numpy(src, dims=get_axes_infos(ipt)) +310def get_tensor( +311 src: Union[Tensor, xr.DataArray, NDArray[Any], Path], +312 ipt: Union[v0_4.InputTensorDescr, v0_5.InputTensorDescr], +313): +314 """helper to cast/load various tensor sources""" +315 +316 if isinstance(src, Tensor): +317 return src +318 +319 if isinstance(src, xr.DataArray): +320 return Tensor.from_xarray(src) +321 +322 if isinstance(src, np.ndarray): +323 return Tensor.from_numpy(src, dims=get_axes_infos(ipt)) +324 +325 if isinstance(src, Path): +326 return load_tensor(src, axes=get_axes_infos(ipt)) +327 +328 assert_never(src) +329 +330 +331def create_sample_for_model( +332 model: AnyModelDescr, +333 *, +334 stat: Optional[Stat] = None, +335 sample_id: SampleId = None, +336 inputs: Optional[ +337 PerMember[Union[Tensor, xr.DataArray, NDArray[Any], Path]] +338 ] = None, # TODO: make non-optional +339 **kwargs: NDArray[Any], # TODO: deprecate in favor of `inputs` +340) -> Sample: +341 """Create a sample from a single set of input(s) for a specific bioimage.io model +342 +343 Args: +344 model: a bioimage.io model description +345 stat: dictionary with sample and dataset statistics (may be updated in-place!) +346 inputs: the input(s) constituting a single sample. +347 """ +348 inputs = {MemberId(k): v for k, v in {**kwargs, **(inputs or {})}.items()} +349 +350 model_inputs = {get_member_id(d): d for d in model.inputs} +351 if unknown := {k for k in inputs if k not in model_inputs}: +352 raise ValueError(f"Got unexpected inputs: {unknown}") 353 -354 if isinstance(src, Path): -355 return load_tensor(src, axes=get_axes_infos(ipt)) -356 -357 assert_never(src) -358 -359 -360def create_sample_for_model( -361 model: AnyModelDescr, -362 *, -363 stat: Optional[Stat] = None, -364 sample_id: SampleId = None, -365 inputs: Optional[ -366 PerMember[Union[Tensor, xr.DataArray, NDArray[Any], Path]] -367 ] = None, # TODO: make non-optional -368 **kwargs: NDArray[Any], # TODO: deprecate in favor of `inputs` -369) -> Sample: -370 """Create a sample from a single set of input(s) for a specific bioimage.io model +354 if missing := { +355 k +356 for k, v in model_inputs.items() +357 if k not in inputs and not (isinstance(v, v0_5.InputTensorDescr) and v.optional) +358 }: +359 raise ValueError(f"Missing non-optional model inputs: {missing}") +360 +361 return Sample( +362 members={ +363 m: get_tensor(inputs[m], ipt) +364 for m, ipt in model_inputs.items() +365 if m in inputs +366 }, +367 stat={} if stat is None else stat, +368 id=sample_id, +369 ) +370 371 -372 Args: -373 model: a bioimage.io model description -374 stat: dictionary with sample and dataset statistics (may be updated in-place!) -375 inputs: the input(s) constituting a single sample. -376 """ -377 inputs = {MemberId(k): v for k, v in {**kwargs, **(inputs or {})}.items()} -378 -379 model_inputs = {get_member_id(d): d for d in model.inputs} -380 if unknown := {k for k in inputs if k not in model_inputs}: -381 raise ValueError(f"Got unexpected inputs: {unknown}") -382 -383 if missing := { -384 k -385 for k, v in model_inputs.items() -386 if k not in inputs and not (isinstance(v, v0_5.InputTensorDescr) and v.optional) -387 }: -388 raise ValueError(f"Missing non-optional model inputs: {missing}") -389 -390 return Sample( -391 members={ -392 m: get_tensor(inputs[m], ipt) -393 for m, ipt in model_inputs.items() -394 if m in inputs -395 }, -396 stat={} if stat is None else stat, -397 id=sample_id, -398 ) -399 -400 -401def load_sample_for_model( -402 *, -403 model: AnyModelDescr, -404 paths: PerMember[Path], -405 axes: Optional[PerMember[Sequence[AxisLike]]] = None, -406 stat: Optional[Stat] = None, -407 sample_id: Optional[SampleId] = None, -408): -409 """load a single sample from `paths` that can be processed by `model`""" -410 -411 if axes is None: -412 axes = {} -413 -414 # make sure members are keyed by MemberId, not string -415 paths = {MemberId(k): v for k, v in paths.items()} -416 axes = {MemberId(k): v for k, v in axes.items()} -417 -418 model_inputs = {get_member_id(d): d for d in model.inputs} -419 -420 if unknown := {k for k in paths if k not in model_inputs}: -421 raise ValueError(f"Got unexpected paths for {unknown}") -422 -423 if unknown := {k for k in axes if k not in model_inputs}: -424 raise ValueError(f"Got unexpected axes hints for: {unknown}") -425 -426 members: Dict[MemberId, Tensor] = {} -427 for m, p in paths.items(): -428 if m not in axes: -429 axes[m] = get_axes_infos(model_inputs[m]) -430 logger.debug( -431 "loading '{}' from {} with default input axes {} ", -432 m, -433 p, -434 axes[m], -435 ) -436 members[m] = load_tensor(p, axes[m]) -437 -438 return Sample( -439 members=members, -440 stat={} if stat is None else stat, -441 id=sample_id or tuple(sorted(paths.values())), -442 ) +372def load_sample_for_model( +373 *, +374 model: AnyModelDescr, +375 paths: PerMember[Path], +376 axes: Optional[PerMember[Sequence[AxisLike]]] = None, +377 stat: Optional[Stat] = None, +378 sample_id: Optional[SampleId] = None, +379): +380 """load a single sample from `paths` that can be processed by `model`""" +381 +382 if axes is None: +383 axes = {} +384 +385 # make sure members are keyed by MemberId, not string +386 paths = {MemberId(k): v for k, v in paths.items()} +387 axes = {MemberId(k): v for k, v in axes.items()} +388 +389 model_inputs = {get_member_id(d): d for d in model.inputs} +390 +391 if unknown := {k for k in paths if k not in model_inputs}: +392 raise ValueError(f"Got unexpected paths for {unknown}") +393 +394 if unknown := {k for k in axes if k not in model_inputs}: +395 raise ValueError(f"Got unexpected axes hints for: {unknown}") +396 +397 members: Dict[MemberId, Tensor] = {} +398 for m, p in paths.items(): +399 if m not in axes: +400 axes[m] = get_axes_infos(model_inputs[m]) +401 logger.debug( +402 "loading '{}' from {} with default input axes {} ", +403 m, +404 p, +405 axes[m], +406 ) +407 members[m] = load_tensor(p, axes[m]) +408 +409 return Sample( +410 members=members, +411 stat={} if stat is None else stat, +412 id=sample_id or tuple(sorted(paths.values())), +413 )
    @@ -556,36 +527,36 @@

    def - import_callable( node: Union[bioimageio.spec.model.v0_4.CallableFromDepencency, bioimageio.spec.model.v0_5.ArchitectureFromLibraryDescr], /, **kwargs: Unpack[bioimageio.spec._internal.io.HashKwargs]) -> Callable[..., Any]: + import_callable( node: Union[bioimageio.spec.model.v0_4.CallableFromDepencency, bioimageio.spec.model.v0_5.ArchitectureFromLibraryDescr], /, **kwargs: Unpack[bioimageio.spec._internal.io.HashKwargs]) -> Callable[..., Any]:
    -
    54def import_callable(
    -55    node: Union[CallableFromDepencency, ArchitectureFromLibraryDescr],
    -56    /,
    -57    **kwargs: Unpack[HashKwargs],
    -58) -> Callable[..., Any]:
    -59    """import a callable (e.g. a torch.nn.Module) from a spec node describing it"""
    -60    if isinstance(node, CallableFromDepencency):
    -61        module = importlib.import_module(node.module_name)
    -62        c = getattr(module, str(node.callable_name))
    -63    elif isinstance(node, ArchitectureFromLibraryDescr):
    -64        module = importlib.import_module(node.import_from)
    -65        c = getattr(module, str(node.callable))
    -66    elif isinstance(node, CallableFromFile):
    -67        c = _import_from_file_impl(node.source_file, str(node.callable_name), **kwargs)
    -68    elif isinstance(node, ArchitectureFromFileDescr):
    -69        c = _import_from_file_impl(node.source, str(node.callable), sha256=node.sha256)
    -70
    -71    else:
    -72        assert_never(node)
    -73
    -74    if not callable(c):
    -75        raise ValueError(f"{node} (imported: {c}) is not callable")
    -76
    -77    return c
    +            
    53def import_callable(
    +54    node: Union[CallableFromDepencency, ArchitectureFromLibraryDescr],
    +55    /,
    +56    **kwargs: Unpack[HashKwargs],
    +57) -> Callable[..., Any]:
    +58    """import a callable (e.g. a torch.nn.Module) from a spec node describing it"""
    +59    if isinstance(node, CallableFromDepencency):
    +60        module = importlib.import_module(node.module_name)
    +61        c = getattr(module, str(node.callable_name))
    +62    elif isinstance(node, ArchitectureFromLibraryDescr):
    +63        module = importlib.import_module(node.import_from)
    +64        c = getattr(module, str(node.callable))
    +65    elif isinstance(node, CallableFromFile):
    +66        c = _import_from_file_impl(node.source_file, str(node.callable_name), **kwargs)
    +67    elif isinstance(node, ArchitectureFromFileDescr):
    +68        c = _import_from_file_impl(node.source, str(node.callable), sha256=node.sha256)
    +69
    +70    else:
    +71        assert_never(node)
    +72
    +73    if not callable(c):
    +74        raise ValueError(f"{node} (imported: {c}) is not callable")
    +75
    +76    return c
     
    @@ -599,29 +570,29 @@

    def - get_axes_infos( io_descr: Union[bioimageio.spec.model.v0_4.InputTensorDescr, bioimageio.spec.model.v0_4.OutputTensorDescr, bioimageio.spec.model.v0_5.InputTensorDescr, bioimageio.spec.model.v0_5.OutputTensorDescr]) -> List[bioimageio.core.axis.AxisInfo]: + get_axes_infos( io_descr: Union[bioimageio.spec.model.v0_4.InputTensorDescr, bioimageio.spec.model.v0_4.OutputTensorDescr, bioimageio.spec.model.v0_5.InputTensorDescr, bioimageio.spec.model.v0_5.OutputTensorDescr]) -> List[bioimageio.core.axis.AxisInfo]:
    -
     96def get_axes_infos(
    - 97    io_descr: Union[
    - 98        v0_4.InputTensorDescr,
    - 99        v0_4.OutputTensorDescr,
    -100        v0_5.InputTensorDescr,
    -101        v0_5.OutputTensorDescr,
    -102    ]
    -103) -> List[AxisInfo]:
    -104    """get a unified, simplified axis representation from spec axes"""
    -105    return [
    -106        (
    -107            AxisInfo.create("i")
    -108            if isinstance(a, str) and a not in ("b", "i", "t", "c", "z", "y", "x")
    -109            else AxisInfo.create(a)
    -110        )
    -111        for a in io_descr.axes
    -112    ]
    +            
     95def get_axes_infos(
    + 96    io_descr: Union[
    + 97        v0_4.InputTensorDescr,
    + 98        v0_4.OutputTensorDescr,
    + 99        v0_5.InputTensorDescr,
    +100        v0_5.OutputTensorDescr,
    +101    ],
    +102) -> List[AxisInfo]:
    +103    """get a unified, simplified axis representation from spec axes"""
    +104    return [
    +105        (
    +106            AxisInfo.create("i")
    +107            if isinstance(a, str) and a not in ("b", "i", "t", "c", "z", "y", "x")
    +108            else AxisInfo.create(a)
    +109        )
    +110        for a in io_descr.axes
    +111    ]
     
    @@ -635,30 +606,30 @@

    def - get_member_id( tensor_description: Union[bioimageio.spec.model.v0_4.InputTensorDescr, bioimageio.spec.model.v0_4.OutputTensorDescr, bioimageio.spec.model.v0_5.InputTensorDescr, bioimageio.spec.model.v0_5.OutputTensorDescr]) -> bioimageio.spec.model.v0_5.TensorId: + get_member_id( tensor_description: Union[bioimageio.spec.model.v0_4.InputTensorDescr, bioimageio.spec.model.v0_4.OutputTensorDescr, bioimageio.spec.model.v0_5.InputTensorDescr, bioimageio.spec.model.v0_5.OutputTensorDescr]) -> bioimageio.spec.model.v0_5.TensorId:
    -
    115def get_member_id(
    -116    tensor_description: Union[
    -117        v0_4.InputTensorDescr,
    -118        v0_4.OutputTensorDescr,
    -119        v0_5.InputTensorDescr,
    -120        v0_5.OutputTensorDescr,
    -121    ]
    -122) -> MemberId:
    -123    """get the normalized tensor ID, usable as a sample member ID"""
    -124
    -125    if isinstance(tensor_description, (v0_4.InputTensorDescr, v0_4.OutputTensorDescr)):
    -126        return MemberId(tensor_description.name)
    -127    elif isinstance(
    -128        tensor_description, (v0_5.InputTensorDescr, v0_5.OutputTensorDescr)
    -129    ):
    -130        return tensor_description.id
    -131    else:
    -132        assert_never(tensor_description)
    +            
    114def get_member_id(
    +115    tensor_description: Union[
    +116        v0_4.InputTensorDescr,
    +117        v0_4.OutputTensorDescr,
    +118        v0_5.InputTensorDescr,
    +119        v0_5.OutputTensorDescr,
    +120    ],
    +121) -> MemberId:
    +122    """get the normalized tensor ID, usable as a sample member ID"""
    +123
    +124    if isinstance(tensor_description, (v0_4.InputTensorDescr, v0_4.OutputTensorDescr)):
    +125        return MemberId(tensor_description.name)
    +126    elif isinstance(
    +127        tensor_description, (v0_5.InputTensorDescr, v0_5.OutputTensorDescr)
    +128    ):
    +129        return tensor_description.id
    +130    else:
    +131        assert_never(tensor_description)
     
    @@ -672,24 +643,24 @@

    def - get_member_ids( tensor_descriptions: Sequence[Union[bioimageio.spec.model.v0_4.InputTensorDescr, bioimageio.spec.model.v0_4.OutputTensorDescr, bioimageio.spec.model.v0_5.InputTensorDescr, bioimageio.spec.model.v0_5.OutputTensorDescr]]) -> List[bioimageio.spec.model.v0_5.TensorId]: + get_member_ids( tensor_descriptions: Sequence[Union[bioimageio.spec.model.v0_4.InputTensorDescr, bioimageio.spec.model.v0_4.OutputTensorDescr, bioimageio.spec.model.v0_5.InputTensorDescr, bioimageio.spec.model.v0_5.OutputTensorDescr]]) -> List[bioimageio.spec.model.v0_5.TensorId]:
    -
    135def get_member_ids(
    -136    tensor_descriptions: Sequence[
    -137        Union[
    -138            v0_4.InputTensorDescr,
    -139            v0_4.OutputTensorDescr,
    -140            v0_5.InputTensorDescr,
    -141            v0_5.OutputTensorDescr,
    -142        ]
    -143    ]
    -144) -> List[MemberId]:
    -145    """get normalized tensor IDs to be used as sample member IDs"""
    -146    return [get_member_id(descr) for descr in tensor_descriptions]
    +            
    134def get_member_ids(
    +135    tensor_descriptions: Sequence[
    +136        Union[
    +137            v0_4.InputTensorDescr,
    +138            v0_4.OutputTensorDescr,
    +139            v0_5.InputTensorDescr,
    +140            v0_5.OutputTensorDescr,
    +141        ]
    +142    ],
    +143) -> List[MemberId]:
    +144    """get normalized tensor IDs to be used as sample member IDs"""
    +145    return [get_member_id(descr) for descr in tensor_descriptions]
     
    @@ -703,29 +674,29 @@

    def - get_test_inputs( model: Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]) -> bioimageio.core.Sample: + get_test_inputs( model: Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]) -> bioimageio.core.Sample:
    -
    149def get_test_inputs(model: AnyModelDescr) -> Sample:
    -150    """returns a model's test input sample"""
    -151    member_ids = get_member_ids(model.inputs)
    -152    if isinstance(model, v0_4.ModelDescr):
    -153        arrays = [load_array(tt) for tt in model.test_inputs]
    -154    else:
    -155        arrays = [load_array(d.test_tensor) for d in model.inputs]
    -156
    -157    axes = [get_axes_infos(t) for t in model.inputs]
    -158    return Sample(
    -159        members={
    -160            m: Tensor.from_numpy(arr, dims=ax)
    -161            for m, arr, ax in zip(member_ids, arrays, axes)
    -162        },
    -163        stat={},
    -164        id="test-input",
    -165    )
    +            
    148def get_test_inputs(model: AnyModelDescr) -> Sample:
    +149    """returns a model's test input sample"""
    +150    member_ids = get_member_ids(model.inputs)
    +151    if isinstance(model, v0_4.ModelDescr):
    +152        arrays = [load_array(tt) for tt in model.test_inputs]
    +153    else:
    +154        arrays = [load_array(d.test_tensor) for d in model.inputs]
    +155
    +156    axes = [get_axes_infos(t) for t in model.inputs]
    +157    return Sample(
    +158        members={
    +159            m: Tensor.from_numpy(arr, dims=ax)
    +160            for m, arr, ax in zip(member_ids, arrays, axes)
    +161        },
    +162        stat={},
    +163        id="test-sample",
    +164    )
     
    @@ -739,31 +710,31 @@

    def - get_test_outputs( model: Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]) -> bioimageio.core.Sample: + get_test_outputs( model: Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]) -> bioimageio.core.Sample:
    -
    168def get_test_outputs(model: AnyModelDescr) -> Sample:
    -169    """returns a model's test output sample"""
    -170    member_ids = get_member_ids(model.outputs)
    -171
    -172    if isinstance(model, v0_4.ModelDescr):
    -173        arrays = [load_array(tt) for tt in model.test_outputs]
    -174    else:
    -175        arrays = [load_array(d.test_tensor) for d in model.outputs]
    -176
    -177    axes = [get_axes_infos(t) for t in model.outputs]
    -178
    -179    return Sample(
    -180        members={
    -181            m: Tensor.from_numpy(arr, dims=ax)
    -182            for m, arr, ax in zip(member_ids, arrays, axes)
    -183        },
    -184        stat={},
    -185        id="test-output",
    -186    )
    +            
    167def get_test_outputs(model: AnyModelDescr) -> Sample:
    +168    """returns a model's test output sample"""
    +169    member_ids = get_member_ids(model.outputs)
    +170
    +171    if isinstance(model, v0_4.ModelDescr):
    +172        arrays = [load_array(tt) for tt in model.test_outputs]
    +173    else:
    +174        arrays = [load_array(d.test_tensor) for d in model.outputs]
    +175
    +176    axes = [get_axes_infos(t) for t in model.outputs]
    +177
    +178    return Sample(
    +179        members={
    +180            m: Tensor.from_numpy(arr, dims=ax)
    +181            for m, arr, ax in zip(member_ids, arrays, axes)
    +182        },
    +183        stat={},
    +184        id="test-sample",
    +185    )
     
    @@ -783,9 +754,9 @@

    -
    189class IO_SampleBlockMeta(NamedTuple):
    -190    input: SampleBlockMeta
    -191    output: SampleBlockMeta
    +            
    188class IO_SampleBlockMeta(NamedTuple):
    +189    input: SampleBlockMeta
    +190    output: SampleBlockMeta
     
    @@ -809,7 +780,7 @@

    - input: bioimageio.core.sample.SampleBlockMeta + input: bioimageio.core.sample.SampleBlockMeta
    @@ -822,7 +793,7 @@

    - output: bioimageio.core.sample.SampleBlockMeta + output: bioimageio.core.sample.SampleBlockMeta
    @@ -839,47 +810,47 @@

    def - get_input_halo( model: bioimageio.spec.model.v0_5.ModelDescr, output_halo: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, bioimageio.core.common.Halo]]): + get_input_halo( model: bioimageio.spec.ModelDescr, output_halo: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, bioimageio.core.common.Halo]]):
    -
    194def get_input_halo(model: v0_5.ModelDescr, output_halo: PerMember[PerAxis[Halo]]):
    -195    """returns which halo input tensors need to be divided into blocks with such that
    -196    `output_halo` can be cropped from their outputs without intorducing gaps."""
    -197    input_halo: Dict[MemberId, Dict[AxisId, Halo]] = {}
    -198    outputs = {t.id: t for t in model.outputs}
    -199    all_tensors = {**{t.id: t for t in model.inputs}, **outputs}
    -200
    -201    for t, th in output_halo.items():
    -202        axes = {a.id: a for a in outputs[t].axes}
    -203
    -204        for a, ah in th.items():
    -205            s = axes[a].size
    -206            if not isinstance(s, v0_5.SizeReference):
    -207                raise ValueError(
    -208                    f"Unable to map output halo for {t}.{a} to an input axis"
    -209                )
    -210
    -211            axis = axes[a]
    -212            ref_axis = {a.id: a for a in all_tensors[s.tensor_id].axes}[s.axis_id]
    -213
    -214            total_output_halo = sum(ah)
    -215            total_input_halo = total_output_halo * axis.scale / ref_axis.scale
    -216            assert (
    -217                total_input_halo == int(total_input_halo) and total_input_halo % 2 == 0
    -218            )
    -219            input_halo.setdefault(s.tensor_id, {})[a] = Halo(
    -220                int(total_input_halo // 2), int(total_input_halo // 2)
    -221            )
    -222
    -223    return input_halo
    +            
    193def get_input_halo(model: v0_5.ModelDescr, output_halo: PerMember[PerAxis[Halo]]):
    +194    """returns which halo input tensors need to be divided into blocks with, such that
    +195    `output_halo` can be cropped from their outputs without introducing gaps."""
    +196    input_halo: Dict[MemberId, Dict[AxisId, Halo]] = {}
    +197    outputs = {t.id: t for t in model.outputs}
    +198    all_tensors = {**{t.id: t for t in model.inputs}, **outputs}
    +199
    +200    for t, th in output_halo.items():
    +201        axes = {a.id: a for a in outputs[t].axes}
    +202
    +203        for a, ah in th.items():
    +204            s = axes[a].size
    +205            if not isinstance(s, v0_5.SizeReference):
    +206                raise ValueError(
    +207                    f"Unable to map output halo for {t}.{a} to an input axis"
    +208                )
    +209
    +210            axis = axes[a]
    +211            ref_axis = {a.id: a for a in all_tensors[s.tensor_id].axes}[s.axis_id]
    +212
    +213            total_output_halo = sum(ah)
    +214            total_input_halo = total_output_halo * axis.scale / ref_axis.scale
    +215            assert (
    +216                total_input_halo == int(total_input_halo) and total_input_halo % 2 == 0
    +217            )
    +218            input_halo.setdefault(s.tensor_id, {})[a] = Halo(
    +219                int(total_input_halo // 2), int(total_input_halo // 2)
    +220            )
    +221
    +222    return input_halo
     
    -

    returns which halo input tensors need to be divided into blocks with such that -output_halo can be cropped from their outputs without intorducing gaps.

    +

    returns which halo input tensors need to be divided into blocks with, such that +output_halo can be cropped from their outputs without introducing gaps.

    @@ -889,63 +860,65 @@

    def - get_block_transform(model: bioimageio.spec.model.v0_5.ModelDescr): + get_block_transform( model: bioimageio.spec.ModelDescr) -> Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, Union[bioimageio.core.sample.LinearSampleAxisTransform, int]]]:
    -
    226def get_block_transform(model: v0_5.ModelDescr):
    -227    """returns how a model's output tensor shapes relate to its input shapes"""
    -228    ret: Dict[MemberId, Dict[AxisId, Union[LinearSampleAxisTransform, int]]] = {}
    -229    batch_axis_trf = None
    -230    for ipt in model.inputs:
    -231        for a in ipt.axes:
    -232            if a.type == "batch":
    -233                batch_axis_trf = LinearSampleAxisTransform(
    -234                    axis=a.id, scale=1, offset=0, member=ipt.id
    -235                )
    -236                break
    -237        if batch_axis_trf is not None:
    -238            break
    -239    axis_scales = {
    -240        t.id: {a.id: a.scale for a in t.axes}
    -241        for t in chain(model.inputs, model.outputs)
    -242    }
    -243    for out in model.outputs:
    -244        new_axes: Dict[AxisId, Union[LinearSampleAxisTransform, int]] = {}
    -245        for a in out.axes:
    -246            if a.size is None:
    -247                assert a.type == "batch"
    -248                if batch_axis_trf is None:
    -249                    raise ValueError(
    -250                        "no batch axis found in any input tensor, but output tensor"
    -251                        + f" '{out.id}' has one."
    -252                    )
    -253                s = batch_axis_trf
    -254            elif isinstance(a.size, int):
    -255                s = a.size
    -256            elif isinstance(a.size, v0_5.DataDependentSize):
    -257                s = -1
    -258            elif isinstance(a.size, v0_5.SizeReference):
    -259                s = LinearSampleAxisTransform(
    -260                    axis=a.size.axis_id,
    -261                    scale=axis_scales[a.size.tensor_id][a.size.axis_id] / a.scale,
    -262                    offset=a.size.offset,
    -263                    member=a.size.tensor_id,
    -264                )
    -265            else:
    -266                assert_never(a.size)
    -267
    -268            new_axes[a.id] = s
    -269
    -270        ret[out.id] = new_axes
    -271
    -272    return ret
    +            
    225def get_block_transform(
    +226    model: v0_5.ModelDescr,
    +227) -> PerMember[PerAxis[Union[LinearSampleAxisTransform, int]]]:
    +228    """returns how a model's output tensor shapes relates to its input shapes"""
    +229    ret: Dict[MemberId, Dict[AxisId, Union[LinearSampleAxisTransform, int]]] = {}
    +230    batch_axis_trf = None
    +231    for ipt in model.inputs:
    +232        for a in ipt.axes:
    +233            if a.type == "batch":
    +234                batch_axis_trf = LinearSampleAxisTransform(
    +235                    axis=a.id, scale=1, offset=0, member=ipt.id
    +236                )
    +237                break
    +238        if batch_axis_trf is not None:
    +239            break
    +240    axis_scales = {
    +241        t.id: {a.id: a.scale for a in t.axes}
    +242        for t in chain(model.inputs, model.outputs)
    +243    }
    +244    for out in model.outputs:
    +245        new_axes: Dict[AxisId, Union[LinearSampleAxisTransform, int]] = {}
    +246        for a in out.axes:
    +247            if a.size is None:
    +248                assert a.type == "batch"
    +249                if batch_axis_trf is None:
    +250                    raise ValueError(
    +251                        "no batch axis found in any input tensor, but output tensor"
    +252                        + f" '{out.id}' has one."
    +253                    )
    +254                s = batch_axis_trf
    +255            elif isinstance(a.size, int):
    +256                s = a.size
    +257            elif isinstance(a.size, v0_5.DataDependentSize):
    +258                s = -1
    +259            elif isinstance(a.size, v0_5.SizeReference):
    +260                s = LinearSampleAxisTransform(
    +261                    axis=a.size.axis_id,
    +262                    scale=axis_scales[a.size.tensor_id][a.size.axis_id] / a.scale,
    +263                    offset=a.size.offset,
    +264                    member=a.size.tensor_id,
    +265                )
    +266            else:
    +267                assert_never(a.size)
    +268
    +269            new_axes[a.id] = s
    +270
    +271        ret[out.id] = new_axes
    +272
    +273    return ret
     
    -

    returns how a model's output tensor shapes relate to its input shapes

    +

    returns how a model's output tensor shapes relates to its input shapes

    @@ -955,75 +928,45 @@

    def - get_io_sample_block_metas( model: bioimageio.spec.model.v0_5.ModelDescr, input_sample_shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]], ns: Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], batch_size: int = 1) -> Tuple[int, Iterable[IO_SampleBlockMeta]]: + get_io_sample_block_metas( model: bioimageio.spec.ModelDescr, input_sample_shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]], ns: Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], batch_size: int = 1) -> Tuple[int, Iterable[IO_SampleBlockMeta]]:
    -
    275def get_io_sample_block_metas(
    -276    model: v0_5.ModelDescr,
    -277    input_sample_shape: PerMember[PerAxis[int]],
    -278    ns: Mapping[Tuple[MemberId, AxisId], ParameterizedSize_N],
    -279    batch_size: int = 1,
    -280) -> Tuple[TotalNumberOfBlocks, Iterable[IO_SampleBlockMeta]]:
    -281    """returns an iterable yielding meta data for corresponding input and output samples"""
    -282    if not isinstance(model, v0_5.ModelDescr):
    -283        raise TypeError(f"get_block_meta() not implemented for {type(model)}")
    -284
    -285    block_axis_sizes = model.get_axis_sizes(ns=ns, batch_size=batch_size)
    -286    input_block_shape = {
    -287        t: {aa: s for (tt, aa), s in block_axis_sizes.inputs.items() if tt == t}
    -288        for t in {tt for tt, _ in block_axis_sizes.inputs}
    -289    }
    -290    output_block_shape = {
    -291        t: {
    -292            aa: s
    -293            for (tt, aa), s in block_axis_sizes.outputs.items()
    -294            if tt == t and not isinstance(s, tuple)
    -295        }
    -296        for t in {tt for tt, _ in block_axis_sizes.outputs}
    -297    }
    -298    output_halo = {
    -299        t.id: {
    -300            a.id: Halo(a.halo, a.halo) for a in t.axes if isinstance(a, v0_5.WithHalo)
    -301        }
    -302        for t in model.outputs
    -303    }
    -304    input_halo = get_input_halo(model, output_halo)
    -305
    -306    # TODO: fix output_sample_shape_data_dep
    -307    #  (below only valid if input_sample_shape is a valid model input,
    -308    #   which is not a valid assumption)
    -309    output_sample_shape_data_dep = model.get_output_tensor_sizes(input_sample_shape)
    -310
    -311    output_sample_shape = {
    -312        t: {
    -313            a: -1 if isinstance(s, tuple) else s
    -314            for a, s in output_sample_shape_data_dep[t].items()
    -315        }
    -316        for t in output_sample_shape_data_dep
    -317    }
    -318    n_input_blocks, input_blocks = split_multiple_shapes_into_blocks(
    -319        input_sample_shape, input_block_shape, halo=input_halo
    -320    )
    -321    n_output_blocks, output_blocks = split_multiple_shapes_into_blocks(
    -322        output_sample_shape, output_block_shape, halo=output_halo
    -323    )
    -324    assert n_input_blocks == n_output_blocks
    -325    return n_input_blocks, (
    -326        IO_SampleBlockMeta(ipt, out)
    -327        for ipt, out in zip(
    -328            sample_block_meta_generator(
    -329                input_blocks, sample_shape=input_sample_shape, sample_id=None
    -330            ),
    -331            sample_block_meta_generator(
    -332                output_blocks,
    -333                sample_shape=output_sample_shape,
    -334                sample_id=None,
    -335            ),
    -336        )
    -337    )
    +            
    276def get_io_sample_block_metas(
    +277    model: v0_5.ModelDescr,
    +278    input_sample_shape: PerMember[PerAxis[int]],
    +279    ns: Mapping[Tuple[MemberId, AxisId], ParameterizedSize_N],
    +280    batch_size: int = 1,
    +281) -> Tuple[TotalNumberOfBlocks, Iterable[IO_SampleBlockMeta]]:
    +282    """returns an iterable yielding meta data for corresponding input and output samples"""
    +283    if not isinstance(model, v0_5.ModelDescr):
    +284        raise TypeError(f"get_block_meta() not implemented for {type(model)}")
    +285
    +286    block_axis_sizes = model.get_axis_sizes(ns=ns, batch_size=batch_size)
    +287    input_block_shape = {
    +288        t: {aa: s for (tt, aa), s in block_axis_sizes.inputs.items() if tt == t}
    +289        for t in {tt for tt, _ in block_axis_sizes.inputs}
    +290    }
    +291    output_halo = {
    +292        t.id: {
    +293            a.id: Halo(a.halo, a.halo) for a in t.axes if isinstance(a, v0_5.WithHalo)
    +294        }
    +295        for t in model.outputs
    +296    }
    +297    input_halo = get_input_halo(model, output_halo)
    +298
    +299    n_input_blocks, input_blocks = split_multiple_shapes_into_blocks(
    +300        input_sample_shape, input_block_shape, halo=input_halo
    +301    )
    +302    block_transform = get_block_transform(model)
    +303    return n_input_blocks, (
    +304        IO_SampleBlockMeta(ipt, ipt.get_transformed(block_transform))
    +305        for ipt in sample_block_meta_generator(
    +306            input_blocks, sample_shape=input_sample_shape, sample_id=None
    +307        )
    +308    )
     
    @@ -1037,31 +980,31 @@

    def - get_tensor( src: Union[bioimageio.core.Tensor, xarray.core.dataarray.DataArray, numpy.ndarray[Any, numpy.dtype[Any]], pathlib.Path], ipt: Union[bioimageio.spec.model.v0_4.InputTensorDescr, bioimageio.spec.model.v0_5.InputTensorDescr]): + get_tensor( src: Union[bioimageio.core.Tensor, xarray.core.dataarray.DataArray, numpy.ndarray[Any, numpy.dtype[Any]], pathlib.Path], ipt: Union[bioimageio.spec.model.v0_4.InputTensorDescr, bioimageio.spec.model.v0_5.InputTensorDescr]):
    -
    340def get_tensor(
    -341    src: Union[Tensor, xr.DataArray, NDArray[Any], Path],
    -342    ipt: Union[v0_4.InputTensorDescr, v0_5.InputTensorDescr],
    -343):
    -344    """helper to cast/load various tensor sources"""
    -345
    -346    if isinstance(src, Tensor):
    -347        return src
    -348
    -349    if isinstance(src, xr.DataArray):
    -350        return Tensor.from_xarray(src)
    -351
    -352    if isinstance(src, np.ndarray):
    -353        return Tensor.from_numpy(src, dims=get_axes_infos(ipt))
    -354
    -355    if isinstance(src, Path):
    -356        return load_tensor(src, axes=get_axes_infos(ipt))
    -357
    -358    assert_never(src)
    +            
    311def get_tensor(
    +312    src: Union[Tensor, xr.DataArray, NDArray[Any], Path],
    +313    ipt: Union[v0_4.InputTensorDescr, v0_5.InputTensorDescr],
    +314):
    +315    """helper to cast/load various tensor sources"""
    +316
    +317    if isinstance(src, Tensor):
    +318        return src
    +319
    +320    if isinstance(src, xr.DataArray):
    +321        return Tensor.from_xarray(src)
    +322
    +323    if isinstance(src, np.ndarray):
    +324        return Tensor.from_numpy(src, dims=get_axes_infos(ipt))
    +325
    +326    if isinstance(src, Path):
    +327        return load_tensor(src, axes=get_axes_infos(ipt))
    +328
    +329    assert_never(src)
     
    @@ -1075,51 +1018,51 @@

    def - create_sample_for_model( model: Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], *, stat: Optional[Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]]] = None, sample_id: Hashable = None, inputs: Optional[Mapping[bioimageio.spec.model.v0_5.TensorId, Union[bioimageio.core.Tensor, xarray.core.dataarray.DataArray, numpy.ndarray[Any, numpy.dtype[Any]], pathlib.Path]]] = None, **kwargs: numpy.ndarray[typing.Any, numpy.dtype[typing.Any]]) -> bioimageio.core.Sample: + create_sample_for_model( model: Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], *, stat: Optional[Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]]] = None, sample_id: Hashable = None, inputs: Optional[Mapping[bioimageio.spec.model.v0_5.TensorId, Union[bioimageio.core.Tensor, xarray.core.dataarray.DataArray, numpy.ndarray[Any, numpy.dtype[Any]], pathlib.Path]]] = None, **kwargs: numpy.ndarray[typing.Any, numpy.dtype[typing.Any]]) -> bioimageio.core.Sample:
    -
    361def create_sample_for_model(
    -362    model: AnyModelDescr,
    -363    *,
    -364    stat: Optional[Stat] = None,
    -365    sample_id: SampleId = None,
    -366    inputs: Optional[
    -367        PerMember[Union[Tensor, xr.DataArray, NDArray[Any], Path]]
    -368    ] = None,  # TODO: make non-optional
    -369    **kwargs: NDArray[Any],  # TODO: deprecate in favor of `inputs`
    -370) -> Sample:
    -371    """Create a sample from a single set of input(s) for a specific bioimage.io model
    -372
    -373    Args:
    -374        model: a bioimage.io model description
    -375        stat: dictionary with sample and dataset statistics (may be updated in-place!)
    -376        inputs: the input(s) constituting a single sample.
    -377    """
    -378    inputs = {MemberId(k): v for k, v in {**kwargs, **(inputs or {})}.items()}
    -379
    -380    model_inputs = {get_member_id(d): d for d in model.inputs}
    -381    if unknown := {k for k in inputs if k not in model_inputs}:
    -382        raise ValueError(f"Got unexpected inputs: {unknown}")
    -383
    -384    if missing := {
    -385        k
    -386        for k, v in model_inputs.items()
    -387        if k not in inputs and not (isinstance(v, v0_5.InputTensorDescr) and v.optional)
    -388    }:
    -389        raise ValueError(f"Missing non-optional model inputs: {missing}")
    -390
    -391    return Sample(
    -392        members={
    -393            m: get_tensor(inputs[m], ipt)
    -394            for m, ipt in model_inputs.items()
    -395            if m in inputs
    -396        },
    -397        stat={} if stat is None else stat,
    -398        id=sample_id,
    -399    )
    +            
    332def create_sample_for_model(
    +333    model: AnyModelDescr,
    +334    *,
    +335    stat: Optional[Stat] = None,
    +336    sample_id: SampleId = None,
    +337    inputs: Optional[
    +338        PerMember[Union[Tensor, xr.DataArray, NDArray[Any], Path]]
    +339    ] = None,  # TODO: make non-optional
    +340    **kwargs: NDArray[Any],  # TODO: deprecate in favor of `inputs`
    +341) -> Sample:
    +342    """Create a sample from a single set of input(s) for a specific bioimage.io model
    +343
    +344    Args:
    +345        model: a bioimage.io model description
    +346        stat: dictionary with sample and dataset statistics (may be updated in-place!)
    +347        inputs: the input(s) constituting a single sample.
    +348    """
    +349    inputs = {MemberId(k): v for k, v in {**kwargs, **(inputs or {})}.items()}
    +350
    +351    model_inputs = {get_member_id(d): d for d in model.inputs}
    +352    if unknown := {k for k in inputs if k not in model_inputs}:
    +353        raise ValueError(f"Got unexpected inputs: {unknown}")
    +354
    +355    if missing := {
    +356        k
    +357        for k, v in model_inputs.items()
    +358        if k not in inputs and not (isinstance(v, v0_5.InputTensorDescr) and v.optional)
    +359    }:
    +360        raise ValueError(f"Missing non-optional model inputs: {missing}")
    +361
    +362    return Sample(
    +363        members={
    +364            m: get_tensor(inputs[m], ipt)
    +365            for m, ipt in model_inputs.items()
    +366            if m in inputs
    +367        },
    +368        stat={} if stat is None else stat,
    +369        id=sample_id,
    +370    )
     
    @@ -1141,54 +1084,54 @@
    Arguments:
    def - load_sample_for_model( *, model: Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.model.v0_5.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], paths: Mapping[bioimageio.spec.model.v0_5.TensorId, pathlib.Path], axes: Optional[Mapping[bioimageio.spec.model.v0_5.TensorId, Sequence[Union[bioimageio.spec.model.v0_5.AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.core.Axis]]]] = None, stat: Optional[Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]]] = None, sample_id: Optional[Hashable] = None): + load_sample_for_model( *, model: Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], paths: Mapping[bioimageio.spec.model.v0_5.TensorId, pathlib.Path], axes: Optional[Mapping[bioimageio.spec.model.v0_5.TensorId, Sequence[Union[bioimageio.spec.model.v0_5.AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.core.Axis]]]] = None, stat: Optional[Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]]] = None, sample_id: Optional[Hashable] = None):
    -
    402def load_sample_for_model(
    -403    *,
    -404    model: AnyModelDescr,
    -405    paths: PerMember[Path],
    -406    axes: Optional[PerMember[Sequence[AxisLike]]] = None,
    -407    stat: Optional[Stat] = None,
    -408    sample_id: Optional[SampleId] = None,
    -409):
    -410    """load a single sample from `paths` that can be processed by `model`"""
    -411
    -412    if axes is None:
    -413        axes = {}
    -414
    -415    # make sure members are keyed by MemberId, not string
    -416    paths = {MemberId(k): v for k, v in paths.items()}
    -417    axes = {MemberId(k): v for k, v in axes.items()}
    -418
    -419    model_inputs = {get_member_id(d): d for d in model.inputs}
    -420
    -421    if unknown := {k for k in paths if k not in model_inputs}:
    -422        raise ValueError(f"Got unexpected paths for {unknown}")
    -423
    -424    if unknown := {k for k in axes if k not in model_inputs}:
    -425        raise ValueError(f"Got unexpected axes hints for: {unknown}")
    -426
    -427    members: Dict[MemberId, Tensor] = {}
    -428    for m, p in paths.items():
    -429        if m not in axes:
    -430            axes[m] = get_axes_infos(model_inputs[m])
    -431            logger.debug(
    -432                "loading '{}' from {} with default input axes {} ",
    -433                m,
    -434                p,
    -435                axes[m],
    -436            )
    -437        members[m] = load_tensor(p, axes[m])
    -438
    -439    return Sample(
    -440        members=members,
    -441        stat={} if stat is None else stat,
    -442        id=sample_id or tuple(sorted(paths.values())),
    -443    )
    +            
    373def load_sample_for_model(
    +374    *,
    +375    model: AnyModelDescr,
    +376    paths: PerMember[Path],
    +377    axes: Optional[PerMember[Sequence[AxisLike]]] = None,
    +378    stat: Optional[Stat] = None,
    +379    sample_id: Optional[SampleId] = None,
    +380):
    +381    """load a single sample from `paths` that can be processed by `model`"""
    +382
    +383    if axes is None:
    +384        axes = {}
    +385
    +386    # make sure members are keyed by MemberId, not string
    +387    paths = {MemberId(k): v for k, v in paths.items()}
    +388    axes = {MemberId(k): v for k, v in axes.items()}
    +389
    +390    model_inputs = {get_member_id(d): d for d in model.inputs}
    +391
    +392    if unknown := {k for k in paths if k not in model_inputs}:
    +393        raise ValueError(f"Got unexpected paths for {unknown}")
    +394
    +395    if unknown := {k for k in axes if k not in model_inputs}:
    +396        raise ValueError(f"Got unexpected axes hints for: {unknown}")
    +397
    +398    members: Dict[MemberId, Tensor] = {}
    +399    for m, p in paths.items():
    +400        if m not in axes:
    +401            axes[m] = get_axes_infos(model_inputs[m])
    +402            logger.debug(
    +403                "loading '{}' from {} with default input axes {} ",
    +404                m,
    +405                p,
    +406                axes[m],
    +407            )
    +408        members[m] = load_tensor(p, axes[m])
    +409
    +410    return Sample(
    +411        members=members,
    +412        stat={} if stat is None else stat,
    +413        id=sample_id or tuple(sorted(paths.values())),
    +414    )
     
    diff --git a/bioimageio/core/io.html b/bioimageio/core/io.html new file mode 100644 index 00000000..09de65e3 --- /dev/null +++ b/bioimageio/core/io.html @@ -0,0 +1,680 @@ + + + + + + + bioimageio.core.io API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.core.io

    + + + + + + +
      1import collections.abc
    +  2import warnings
    +  3from pathlib import Path, PurePosixPath
    +  4from typing import Any, Mapping, Optional, Sequence, Tuple, Union
    +  5
    +  6import h5py
    +  7import numpy as np
    +  8from imageio.v3 import imread, imwrite
    +  9from loguru import logger
    + 10from numpy.typing import NDArray
    + 11from pydantic import BaseModel, ConfigDict, TypeAdapter
    + 12
    + 13from bioimageio.spec.utils import load_array, save_array
    + 14
    + 15from .axis import AxisLike
    + 16from .common import PerMember
    + 17from .sample import Sample
    + 18from .stat_measures import DatasetMeasure, MeasureValue
    + 19from .tensor import Tensor
    + 20
    + 21DEFAULT_H5_DATASET_PATH = "data"
    + 22
    + 23
    + 24def load_image(path: Path, is_volume: Optional[bool] = None) -> NDArray[Any]:
    + 25    """load a single image as numpy array
    + 26
    + 27    Args:
    + 28        path: image path
    + 29        is_volume: deprecated
    + 30    """
    + 31    if is_volume is not None:
    + 32        warnings.warn("**is_volume** is deprecated and will be removed soon.")
    + 33
    + 34    file_path, subpath = _split_dataset_path(Path(path))
    + 35
    + 36    if file_path.suffix == ".npy":
    + 37        if subpath is not None:
    + 38            raise ValueError(f"Unexpected subpath {subpath} for .npy path {path}")
    + 39        return load_array(path)
    + 40    elif file_path.suffix in (".h5", ".hdf", ".hdf5"):
    + 41        if subpath is None:
    + 42            dataset_path = DEFAULT_H5_DATASET_PATH
    + 43        else:
    + 44            dataset_path = str(subpath)
    + 45
    + 46        with h5py.File(file_path, "r") as f:
    + 47            h5_dataset = f.get(  # pyright: ignore[reportUnknownVariableType]
    + 48                dataset_path
    + 49            )
    + 50            if not isinstance(h5_dataset, h5py.Dataset):
    + 51                raise ValueError(
    + 52                    f"{path} is not of type {h5py.Dataset}, but has type "
    + 53                    + str(
    + 54                        type(h5_dataset)  # pyright: ignore[reportUnknownArgumentType]
    + 55                    )
    + 56                )
    + 57            image: NDArray[Any]
    + 58            image = h5_dataset[:]  # pyright: ignore[reportUnknownVariableType]
    + 59            assert isinstance(image, np.ndarray), type(
    + 60                image  # pyright: ignore[reportUnknownArgumentType]
    + 61            )
    + 62            return image  # pyright: ignore[reportUnknownVariableType]
    + 63    else:
    + 64        return imread(path)  # pyright: ignore[reportUnknownVariableType]
    + 65
    + 66
    + 67def load_tensor(path: Path, axes: Optional[Sequence[AxisLike]] = None) -> Tensor:
    + 68    # TODO: load axis meta data
    + 69    array = load_image(path)
    + 70
    + 71    return Tensor.from_numpy(array, dims=axes)
    + 72
    + 73
    + 74def _split_dataset_path(path: Path) -> Tuple[Path, Optional[PurePosixPath]]:
    + 75    """Split off subpath (e.g. internal  h5 dataset path)
    + 76    from a file path following a file extension.
    + 77
    + 78    Examples:
    + 79        >>> _split_dataset_path(Path("my_file.h5/dataset"))
    + 80        (PosixPath('my_file.h5'), PurePosixPath('dataset'))
    + 81
    + 82        If no suffix is detected the path is returned with
    + 83        >>> _split_dataset_path(Path("my_plain_file"))
    + 84        (PosixPath('my_plain_file'), None)
    + 85
    + 86    """
    + 87    if path.suffix:
    + 88        return path, None
    + 89
    + 90    for p in path.parents:
    + 91        if p.suffix:
    + 92            return p, PurePosixPath(path.relative_to(p))
    + 93
    + 94    return path, None
    + 95
    + 96
    + 97def save_tensor(path: Path, tensor: Tensor) -> None:
    + 98    # TODO: save axis meta data
    + 99
    +100    data: NDArray[Any] = tensor.data.to_numpy()
    +101    file_path, subpath = _split_dataset_path(Path(path))
    +102    if not file_path.suffix:
    +103        raise ValueError(f"No suffix (needed to decide file format) found in {path}")
    +104
    +105    file_path.parent.mkdir(exist_ok=True, parents=True)
    +106    if file_path.suffix == ".npy":
    +107        if subpath is not None:
    +108            raise ValueError(f"Unexpected subpath {subpath} found in .npy path {path}")
    +109        save_array(file_path, data)
    +110    elif file_path.suffix in (".h5", ".hdf", ".hdf5"):
    +111        if subpath is None:
    +112            dataset_path = DEFAULT_H5_DATASET_PATH
    +113        else:
    +114            dataset_path = str(subpath)
    +115
    +116        with h5py.File(file_path, "a") as f:
    +117            if dataset_path in f:
    +118                del f[dataset_path]
    +119
    +120            _ = f.create_dataset(dataset_path, data=data, chunks=True)
    +121    else:
    +122        # if singleton_axes := [a for a, s in tensor.tagged_shape.items() if s == 1]:
    +123        #     tensor = tensor[{a: 0 for a in singleton_axes}]
    +124        #     singleton_axes_msg = f"(without singleton axes {singleton_axes}) "
    +125        # else:
    +126        singleton_axes_msg = ""
    +127
    +128        logger.debug(
    +129            "writing tensor {} {}to {}",
    +130            dict(tensor.tagged_shape),
    +131            singleton_axes_msg,
    +132            path,
    +133        )
    +134        imwrite(path, data)
    +135
    +136
    +137def save_sample(path: Union[Path, str, PerMember[Path]], sample: Sample) -> None:
    +138    """save a sample to path
    +139
    +140    If `path` is a pathlib.Path or a string it must contain `{member_id}` and may contain `{sample_id}`,
    +141    which are resolved with the `sample` object.
    +142    """
    +143
    +144    if not isinstance(path, collections.abc.Mapping) and "{member_id}" not in str(path):
    +145        raise ValueError(f"missing `{{member_id}}` in path {path}")
    +146
    +147    for m, t in sample.members.items():
    +148        if isinstance(path, collections.abc.Mapping):
    +149            p = path[m]
    +150        else:
    +151            p = Path(str(path).format(sample_id=sample.id, member_id=m))
    +152
    +153        save_tensor(p, t)
    +154
    +155
    +156class _SerializedDatasetStatsEntry(
    +157    BaseModel, frozen=True, arbitrary_types_allowed=True
    +158):
    +159    measure: DatasetMeasure
    +160    value: MeasureValue
    +161
    +162
    +163_stat_adapter = TypeAdapter(
    +164    Sequence[_SerializedDatasetStatsEntry],
    +165    config=ConfigDict(arbitrary_types_allowed=True),
    +166)
    +167
    +168
    +169def save_dataset_stat(stat: Mapping[DatasetMeasure, MeasureValue], path: Path):
    +170    serializable = [
    +171        _SerializedDatasetStatsEntry(measure=k, value=v) for k, v in stat.items()
    +172    ]
    +173    _ = path.write_bytes(_stat_adapter.dump_json(serializable))
    +174
    +175
    +176def load_dataset_stat(path: Path):
    +177    seq = _stat_adapter.validate_json(path.read_bytes())
    +178    return {e.measure: e.value for e in seq}
    +
    + + +
    +
    +
    + DEFAULT_H5_DATASET_PATH = +'data' + + +
    + + + + +
    +
    + +
    + + def + load_image( path: pathlib.Path, is_volume: Optional[bool] = None) -> numpy.ndarray[typing.Any, numpy.dtype[typing.Any]]: + + + +
    + +
    25def load_image(path: Path, is_volume: Optional[bool] = None) -> NDArray[Any]:
    +26    """load a single image as numpy array
    +27
    +28    Args:
    +29        path: image path
    +30        is_volume: deprecated
    +31    """
    +32    if is_volume is not None:
    +33        warnings.warn("**is_volume** is deprecated and will be removed soon.")
    +34
    +35    file_path, subpath = _split_dataset_path(Path(path))
    +36
    +37    if file_path.suffix == ".npy":
    +38        if subpath is not None:
    +39            raise ValueError(f"Unexpected subpath {subpath} for .npy path {path}")
    +40        return load_array(path)
    +41    elif file_path.suffix in (".h5", ".hdf", ".hdf5"):
    +42        if subpath is None:
    +43            dataset_path = DEFAULT_H5_DATASET_PATH
    +44        else:
    +45            dataset_path = str(subpath)
    +46
    +47        with h5py.File(file_path, "r") as f:
    +48            h5_dataset = f.get(  # pyright: ignore[reportUnknownVariableType]
    +49                dataset_path
    +50            )
    +51            if not isinstance(h5_dataset, h5py.Dataset):
    +52                raise ValueError(
    +53                    f"{path} is not of type {h5py.Dataset}, but has type "
    +54                    + str(
    +55                        type(h5_dataset)  # pyright: ignore[reportUnknownArgumentType]
    +56                    )
    +57                )
    +58            image: NDArray[Any]
    +59            image = h5_dataset[:]  # pyright: ignore[reportUnknownVariableType]
    +60            assert isinstance(image, np.ndarray), type(
    +61                image  # pyright: ignore[reportUnknownArgumentType]
    +62            )
    +63            return image  # pyright: ignore[reportUnknownVariableType]
    +64    else:
    +65        return imread(path)  # pyright: ignore[reportUnknownVariableType]
    +
    + + +

    load a single image as numpy array

    + +
    Arguments:
    + +
      +
    • path: image path
    • +
    • is_volume: deprecated
    • +
    +
    + + +
    +
    + +
    + + def + load_tensor( path: pathlib.Path, axes: Optional[Sequence[Union[bioimageio.spec.model.v0_5.AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.core.Axis]]] = None) -> bioimageio.core.Tensor: + + + +
    + +
    68def load_tensor(path: Path, axes: Optional[Sequence[AxisLike]] = None) -> Tensor:
    +69    # TODO: load axis meta data
    +70    array = load_image(path)
    +71
    +72    return Tensor.from_numpy(array, dims=axes)
    +
    + + + + +
    +
    + +
    + + def + save_tensor(path: pathlib.Path, tensor: bioimageio.core.Tensor) -> None: + + + +
    + +
     98def save_tensor(path: Path, tensor: Tensor) -> None:
    + 99    # TODO: save axis meta data
    +100
    +101    data: NDArray[Any] = tensor.data.to_numpy()
    +102    file_path, subpath = _split_dataset_path(Path(path))
    +103    if not file_path.suffix:
    +104        raise ValueError(f"No suffix (needed to decide file format) found in {path}")
    +105
    +106    file_path.parent.mkdir(exist_ok=True, parents=True)
    +107    if file_path.suffix == ".npy":
    +108        if subpath is not None:
    +109            raise ValueError(f"Unexpected subpath {subpath} found in .npy path {path}")
    +110        save_array(file_path, data)
    +111    elif file_path.suffix in (".h5", ".hdf", ".hdf5"):
    +112        if subpath is None:
    +113            dataset_path = DEFAULT_H5_DATASET_PATH
    +114        else:
    +115            dataset_path = str(subpath)
    +116
    +117        with h5py.File(file_path, "a") as f:
    +118            if dataset_path in f:
    +119                del f[dataset_path]
    +120
    +121            _ = f.create_dataset(dataset_path, data=data, chunks=True)
    +122    else:
    +123        # if singleton_axes := [a for a, s in tensor.tagged_shape.items() if s == 1]:
    +124        #     tensor = tensor[{a: 0 for a in singleton_axes}]
    +125        #     singleton_axes_msg = f"(without singleton axes {singleton_axes}) "
    +126        # else:
    +127        singleton_axes_msg = ""
    +128
    +129        logger.debug(
    +130            "writing tensor {} {}to {}",
    +131            dict(tensor.tagged_shape),
    +132            singleton_axes_msg,
    +133            path,
    +134        )
    +135        imwrite(path, data)
    +
    + + + + +
    +
    + +
    + + def + save_sample( path: Union[pathlib.Path, str, Mapping[bioimageio.spec.model.v0_5.TensorId, pathlib.Path]], sample: bioimageio.core.Sample) -> None: + + + +
    + +
    138def save_sample(path: Union[Path, str, PerMember[Path]], sample: Sample) -> None:
    +139    """save a sample to path
    +140
    +141    If `path` is a pathlib.Path or a string it must contain `{member_id}` and may contain `{sample_id}`,
    +142    which are resolved with the `sample` object.
    +143    """
    +144
    +145    if not isinstance(path, collections.abc.Mapping) and "{member_id}" not in str(path):
    +146        raise ValueError(f"missing `{{member_id}}` in path {path}")
    +147
    +148    for m, t in sample.members.items():
    +149        if isinstance(path, collections.abc.Mapping):
    +150            p = path[m]
    +151        else:
    +152            p = Path(str(path).format(sample_id=sample.id, member_id=m))
    +153
    +154        save_tensor(p, t)
    +
    + + +

    save a sample to path

    + +

    If path is a pathlib.Path or a string it must contain {member_id} and may contain {sample_id}, +which are resolved with the sample object.

    +
    + + +
    +
    + +
    + + def + save_dataset_stat( stat: Mapping[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]], path: pathlib.Path): + + + +
    + +
    170def save_dataset_stat(stat: Mapping[DatasetMeasure, MeasureValue], path: Path):
    +171    serializable = [
    +172        _SerializedDatasetStatsEntry(measure=k, value=v) for k, v in stat.items()
    +173    ]
    +174    _ = path.write_bytes(_stat_adapter.dump_json(serializable))
    +
    + + + + +
    +
    + +
    + + def + load_dataset_stat(path: pathlib.Path): + + + +
    + +
    177def load_dataset_stat(path: Path):
    +178    seq = _stat_adapter.validate_json(path.read_bytes())
    +179    return {e.measure: e.value for e in seq}
    +
    + + + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/core/model_adapters.html b/bioimageio/core/model_adapters.html new file mode 100644 index 00000000..3b386f73 --- /dev/null +++ b/bioimageio/core/model_adapters.html @@ -0,0 +1,814 @@ + + + + + + + bioimageio.core.model_adapters API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.core.model_adapters

    + + + + + + +
    1from ._model_adapter import ModelAdapter, create_model_adapter, get_weight_formats
    +2
    +3__all__ = [
    +4    "ModelAdapter",
    +5    "create_model_adapter",
    +6    "get_weight_formats",
    +7]
    +
    + + +
    +
    + +
    + + class + ModelAdapter(abc.ABC): + + + +
    + +
     23class ModelAdapter(ABC):
    + 24    """
    + 25    Represents model *without* any preprocessing or postprocessing.
    + 26
    + 27    ```
    + 28    from bioimageio.core import load_description
    + 29
    + 30    model = load_description(...)
    + 31
    + 32    # option 1:
    + 33    adapter = ModelAdapter.create(model)
    + 34    adapter.forward(...)
    + 35    adapter.unload()
    + 36
    + 37    # option 2:
    + 38    with ModelAdapter.create(model) as adapter:
    + 39        adapter.forward(...)
    + 40    ```
    + 41    """
    + 42
    + 43    @final
    + 44    @classmethod
    + 45    def create(
    + 46        cls,
    + 47        model_description: Union[v0_4.ModelDescr, v0_5.ModelDescr],
    + 48        *,
    + 49        devices: Optional[Sequence[str]] = None,
    + 50        weight_format_priority_order: Optional[Sequence[WeightsFormat]] = None,
    + 51    ):
    + 52        """
    + 53        Creates model adapter based on the passed spec
    + 54        Note: All specific adapters should happen inside this function to prevent different framework
    + 55        initializations interfering with each other
    + 56        """
    + 57        if not isinstance(model_description, (v0_4.ModelDescr, v0_5.ModelDescr)):
    + 58            raise TypeError(
    + 59                f"expected v0_4.ModelDescr or v0_5.ModelDescr, but got {type(model_description)}"
    + 60            )
    + 61
    + 62        weights = model_description.weights
    + 63        errors: List[Tuple[WeightsFormat, Exception]] = []
    + 64        weight_format_priority_order = (
    + 65            DEFAULT_WEIGHT_FORMAT_PRIORITY_ORDER
    + 66            if weight_format_priority_order is None
    + 67            else weight_format_priority_order
    + 68        )
    + 69        # limit weight formats to the ones present
    + 70        weight_format_priority_order = [
    + 71            w for w in weight_format_priority_order if getattr(weights, w) is not None
    + 72        ]
    + 73
    + 74        for wf in weight_format_priority_order:
    + 75            if wf == "pytorch_state_dict" and weights.pytorch_state_dict is not None:
    + 76                try:
    + 77                    from ._pytorch_model_adapter import PytorchModelAdapter
    + 78
    + 79                    return PytorchModelAdapter(
    + 80                        outputs=model_description.outputs,
    + 81                        weights=weights.pytorch_state_dict,
    + 82                        devices=devices,
    + 83                    )
    + 84                except Exception as e:
    + 85                    errors.append((wf, e))
    + 86            elif (
    + 87                wf == "tensorflow_saved_model_bundle"
    + 88                and weights.tensorflow_saved_model_bundle is not None
    + 89            ):
    + 90                try:
    + 91                    from ._tensorflow_model_adapter import TensorflowModelAdapter
    + 92
    + 93                    return TensorflowModelAdapter(
    + 94                        model_description=model_description, devices=devices
    + 95                    )
    + 96                except Exception as e:
    + 97                    errors.append((wf, e))
    + 98            elif wf == "onnx" and weights.onnx is not None:
    + 99                try:
    +100                    from ._onnx_model_adapter import ONNXModelAdapter
    +101
    +102                    return ONNXModelAdapter(
    +103                        model_description=model_description, devices=devices
    +104                    )
    +105                except Exception as e:
    +106                    errors.append((wf, e))
    +107            elif wf == "torchscript" and weights.torchscript is not None:
    +108                try:
    +109                    from ._torchscript_model_adapter import TorchscriptModelAdapter
    +110
    +111                    return TorchscriptModelAdapter(
    +112                        model_description=model_description, devices=devices
    +113                    )
    +114                except Exception as e:
    +115                    errors.append((wf, e))
    +116            elif wf == "keras_hdf5" and weights.keras_hdf5 is not None:
    +117                # keras can either be installed as a separate package or used as part of tensorflow
    +118                # we try to first import the keras model adapter using the separate package and,
    +119                # if it is not available, try to load the one using tf
    +120                try:
    +121                    from ._keras_model_adapter import (
    +122                        KerasModelAdapter,
    +123                        keras,  # type: ignore
    +124                    )
    +125
    +126                    if keras is None:
    +127                        from ._tensorflow_model_adapter import KerasModelAdapter
    +128
    +129                    return KerasModelAdapter(
    +130                        model_description=model_description, devices=devices
    +131                    )
    +132                except Exception as e:
    +133                    errors.append((wf, e))
    +134
    +135        assert errors
    +136        if len(weight_format_priority_order) == 1:
    +137            assert len(errors) == 1
    +138            raise ValueError(
    +139                f"The '{weight_format_priority_order[0]}' model adapter could not be created"
    +140                + f" in this environment:\n{errors[0][1].__class__.__name__}({errors[0][1]}).\n\n"
    +141            )
    +142
    +143        else:
    +144            error_list = "\n - ".join(
    +145                f"{wf}: {e.__class__.__name__}({e})" for wf, e in errors
    +146            )
    +147            raise ValueError(
    +148                "None of the weight format specific model adapters could be created"
    +149                + f" in this environment. Errors are:\n\n{error_list}.\n\n"
    +150            )
    +151
    +152    @final
    +153    def load(self, *, devices: Optional[Sequence[str]] = None) -> None:
    +154        warnings.warn("Deprecated. ModelAdapter is loaded on initialization")
    +155
    +156    @abstractmethod
    +157    def forward(self, *input_tensors: Optional[Tensor]) -> List[Optional[Tensor]]:
    +158        """
    +159        Run forward pass of model to get model predictions
    +160        """
    +161        # TODO: handle tensor.transpose in here and make _forward_impl the abstract impl
    +162
    +163    @abstractmethod
    +164    def unload(self):
    +165        """
    +166        Unload model from any devices, freeing their memory.
    +167        The moder adapter should be considered unusable afterwards.
    +168        """
    +
    + + +

    Represents model without any preprocessing or postprocessing.

    + +
    from bioimageio.core import load_description
    +
    +model = load_description(...)
    +
    +# option 1:
    +adapter = ModelAdapter.create(model)
    +adapter.forward(...)
    +adapter.unload()
    +
    +# option 2:
    +with ModelAdapter.create(model) as adapter:
    +    adapter.forward(...)
    +
    +
    + + +
    + +
    +
    @final
    +
    @classmethod
    + + def + create( cls, model_description: Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], *, devices: Optional[Sequence[str]] = None, weight_format_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None): + + + +
    + +
     43    @final
    + 44    @classmethod
    + 45    def create(
    + 46        cls,
    + 47        model_description: Union[v0_4.ModelDescr, v0_5.ModelDescr],
    + 48        *,
    + 49        devices: Optional[Sequence[str]] = None,
    + 50        weight_format_priority_order: Optional[Sequence[WeightsFormat]] = None,
    + 51    ):
    + 52        """
    + 53        Creates model adapter based on the passed spec
    + 54        Note: All specific adapters should happen inside this function to prevent different framework
    + 55        initializations interfering with each other
    + 56        """
    + 57        if not isinstance(model_description, (v0_4.ModelDescr, v0_5.ModelDescr)):
    + 58            raise TypeError(
    + 59                f"expected v0_4.ModelDescr or v0_5.ModelDescr, but got {type(model_description)}"
    + 60            )
    + 61
    + 62        weights = model_description.weights
    + 63        errors: List[Tuple[WeightsFormat, Exception]] = []
    + 64        weight_format_priority_order = (
    + 65            DEFAULT_WEIGHT_FORMAT_PRIORITY_ORDER
    + 66            if weight_format_priority_order is None
    + 67            else weight_format_priority_order
    + 68        )
    + 69        # limit weight formats to the ones present
    + 70        weight_format_priority_order = [
    + 71            w for w in weight_format_priority_order if getattr(weights, w) is not None
    + 72        ]
    + 73
    + 74        for wf in weight_format_priority_order:
    + 75            if wf == "pytorch_state_dict" and weights.pytorch_state_dict is not None:
    + 76                try:
    + 77                    from ._pytorch_model_adapter import PytorchModelAdapter
    + 78
    + 79                    return PytorchModelAdapter(
    + 80                        outputs=model_description.outputs,
    + 81                        weights=weights.pytorch_state_dict,
    + 82                        devices=devices,
    + 83                    )
    + 84                except Exception as e:
    + 85                    errors.append((wf, e))
    + 86            elif (
    + 87                wf == "tensorflow_saved_model_bundle"
    + 88                and weights.tensorflow_saved_model_bundle is not None
    + 89            ):
    + 90                try:
    + 91                    from ._tensorflow_model_adapter import TensorflowModelAdapter
    + 92
    + 93                    return TensorflowModelAdapter(
    + 94                        model_description=model_description, devices=devices
    + 95                    )
    + 96                except Exception as e:
    + 97                    errors.append((wf, e))
    + 98            elif wf == "onnx" and weights.onnx is not None:
    + 99                try:
    +100                    from ._onnx_model_adapter import ONNXModelAdapter
    +101
    +102                    return ONNXModelAdapter(
    +103                        model_description=model_description, devices=devices
    +104                    )
    +105                except Exception as e:
    +106                    errors.append((wf, e))
    +107            elif wf == "torchscript" and weights.torchscript is not None:
    +108                try:
    +109                    from ._torchscript_model_adapter import TorchscriptModelAdapter
    +110
    +111                    return TorchscriptModelAdapter(
    +112                        model_description=model_description, devices=devices
    +113                    )
    +114                except Exception as e:
    +115                    errors.append((wf, e))
    +116            elif wf == "keras_hdf5" and weights.keras_hdf5 is not None:
    +117                # keras can either be installed as a separate package or used as part of tensorflow
    +118                # we try to first import the keras model adapter using the separate package and,
    +119                # if it is not available, try to load the one using tf
    +120                try:
    +121                    from ._keras_model_adapter import (
    +122                        KerasModelAdapter,
    +123                        keras,  # type: ignore
    +124                    )
    +125
    +126                    if keras is None:
    +127                        from ._tensorflow_model_adapter import KerasModelAdapter
    +128
    +129                    return KerasModelAdapter(
    +130                        model_description=model_description, devices=devices
    +131                    )
    +132                except Exception as e:
    +133                    errors.append((wf, e))
    +134
    +135        assert errors
    +136        if len(weight_format_priority_order) == 1:
    +137            assert len(errors) == 1
    +138            raise ValueError(
    +139                f"The '{weight_format_priority_order[0]}' model adapter could not be created"
    +140                + f" in this environment:\n{errors[0][1].__class__.__name__}({errors[0][1]}).\n\n"
    +141            )
    +142
    +143        else:
    +144            error_list = "\n - ".join(
    +145                f"{wf}: {e.__class__.__name__}({e})" for wf, e in errors
    +146            )
    +147            raise ValueError(
    +148                "None of the weight format specific model adapters could be created"
    +149                + f" in this environment. Errors are:\n\n{error_list}.\n\n"
    +150            )
    +
    + + +

    Creates model adapter based on the passed spec +Note: All specific adapters should happen inside this function to prevent different framework +initializations interfering with each other

    +
    + + +
    +
    + +
    +
    @final
    + + def + load(self, *, devices: Optional[Sequence[str]] = None) -> None: + + + +
    + +
    152    @final
    +153    def load(self, *, devices: Optional[Sequence[str]] = None) -> None:
    +154        warnings.warn("Deprecated. ModelAdapter is loaded on initialization")
    +
    + + + + +
    +
    + +
    +
    @abstractmethod
    + + def + forward( self, *input_tensors: Optional[bioimageio.core.Tensor]) -> List[Optional[bioimageio.core.Tensor]]: + + + +
    + +
    156    @abstractmethod
    +157    def forward(self, *input_tensors: Optional[Tensor]) -> List[Optional[Tensor]]:
    +158        """
    +159        Run forward pass of model to get model predictions
    +160        """
    +161        # TODO: handle tensor.transpose in here and make _forward_impl the abstract impl
    +
    + + +

    Run forward pass of model to get model predictions

    +
    + + +
    +
    + +
    +
    @abstractmethod
    + + def + unload(self): + + + +
    + +
    163    @abstractmethod
    +164    def unload(self):
    +165        """
    +166        Unload model from any devices, freeing their memory.
    +167        The moder adapter should be considered unusable afterwards.
    +168        """
    +
    + + +

    Unload model from any devices, freeing their memory. +The moder adapter should be considered unusable afterwards.

    +
    + + +
    +
    +
    + +
    +
    @final
    +
    @classmethod
    + + def + create_model_adapter( model_description: Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], *, devices: Optional[Sequence[str]] = None, weight_format_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None): + + + +
    + +
     43    @final
    + 44    @classmethod
    + 45    def create(
    + 46        cls,
    + 47        model_description: Union[v0_4.ModelDescr, v0_5.ModelDescr],
    + 48        *,
    + 49        devices: Optional[Sequence[str]] = None,
    + 50        weight_format_priority_order: Optional[Sequence[WeightsFormat]] = None,
    + 51    ):
    + 52        """
    + 53        Creates model adapter based on the passed spec
    + 54        Note: All specific adapters should happen inside this function to prevent different framework
    + 55        initializations interfering with each other
    + 56        """
    + 57        if not isinstance(model_description, (v0_4.ModelDescr, v0_5.ModelDescr)):
    + 58            raise TypeError(
    + 59                f"expected v0_4.ModelDescr or v0_5.ModelDescr, but got {type(model_description)}"
    + 60            )
    + 61
    + 62        weights = model_description.weights
    + 63        errors: List[Tuple[WeightsFormat, Exception]] = []
    + 64        weight_format_priority_order = (
    + 65            DEFAULT_WEIGHT_FORMAT_PRIORITY_ORDER
    + 66            if weight_format_priority_order is None
    + 67            else weight_format_priority_order
    + 68        )
    + 69        # limit weight formats to the ones present
    + 70        weight_format_priority_order = [
    + 71            w for w in weight_format_priority_order if getattr(weights, w) is not None
    + 72        ]
    + 73
    + 74        for wf in weight_format_priority_order:
    + 75            if wf == "pytorch_state_dict" and weights.pytorch_state_dict is not None:
    + 76                try:
    + 77                    from ._pytorch_model_adapter import PytorchModelAdapter
    + 78
    + 79                    return PytorchModelAdapter(
    + 80                        outputs=model_description.outputs,
    + 81                        weights=weights.pytorch_state_dict,
    + 82                        devices=devices,
    + 83                    )
    + 84                except Exception as e:
    + 85                    errors.append((wf, e))
    + 86            elif (
    + 87                wf == "tensorflow_saved_model_bundle"
    + 88                and weights.tensorflow_saved_model_bundle is not None
    + 89            ):
    + 90                try:
    + 91                    from ._tensorflow_model_adapter import TensorflowModelAdapter
    + 92
    + 93                    return TensorflowModelAdapter(
    + 94                        model_description=model_description, devices=devices
    + 95                    )
    + 96                except Exception as e:
    + 97                    errors.append((wf, e))
    + 98            elif wf == "onnx" and weights.onnx is not None:
    + 99                try:
    +100                    from ._onnx_model_adapter import ONNXModelAdapter
    +101
    +102                    return ONNXModelAdapter(
    +103                        model_description=model_description, devices=devices
    +104                    )
    +105                except Exception as e:
    +106                    errors.append((wf, e))
    +107            elif wf == "torchscript" and weights.torchscript is not None:
    +108                try:
    +109                    from ._torchscript_model_adapter import TorchscriptModelAdapter
    +110
    +111                    return TorchscriptModelAdapter(
    +112                        model_description=model_description, devices=devices
    +113                    )
    +114                except Exception as e:
    +115                    errors.append((wf, e))
    +116            elif wf == "keras_hdf5" and weights.keras_hdf5 is not None:
    +117                # keras can either be installed as a separate package or used as part of tensorflow
    +118                # we try to first import the keras model adapter using the separate package and,
    +119                # if it is not available, try to load the one using tf
    +120                try:
    +121                    from ._keras_model_adapter import (
    +122                        KerasModelAdapter,
    +123                        keras,  # type: ignore
    +124                    )
    +125
    +126                    if keras is None:
    +127                        from ._tensorflow_model_adapter import KerasModelAdapter
    +128
    +129                    return KerasModelAdapter(
    +130                        model_description=model_description, devices=devices
    +131                    )
    +132                except Exception as e:
    +133                    errors.append((wf, e))
    +134
    +135        assert errors
    +136        if len(weight_format_priority_order) == 1:
    +137            assert len(errors) == 1
    +138            raise ValueError(
    +139                f"The '{weight_format_priority_order[0]}' model adapter could not be created"
    +140                + f" in this environment:\n{errors[0][1].__class__.__name__}({errors[0][1]}).\n\n"
    +141            )
    +142
    +143        else:
    +144            error_list = "\n - ".join(
    +145                f"{wf}: {e.__class__.__name__}({e})" for wf, e in errors
    +146            )
    +147            raise ValueError(
    +148                "None of the weight format specific model adapters could be created"
    +149                + f" in this environment. Errors are:\n\n{error_list}.\n\n"
    +150            )
    +
    + + +

    Creates model adapter based on the passed spec +Note: All specific adapters should happen inside this function to prevent different framework +initializations interfering with each other

    +
    + + +
    +
    + +
    + + def + get_weight_formats() -> List[str]: + + + +
    + +
    171def get_weight_formats() -> List[str]:
    +172    """
    +173    Return list of supported weight types
    +174    """
    +175    return list(DEFAULT_WEIGHT_FORMAT_PRIORITY_ORDER)
    +
    + + +

    Return list of supported weight types

    +
    + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/core/prediction.html b/bioimageio/core/prediction.html new file mode 100644 index 00000000..4e9ca22f --- /dev/null +++ b/bioimageio/core/prediction.html @@ -0,0 +1,687 @@ + + + + + + + bioimageio.core.prediction API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.core.prediction

    + + + + + + +
      1import collections.abc
    +  2from pathlib import Path
    +  3from typing import (
    +  4    Any,
    +  5    Hashable,
    +  6    Iterable,
    +  7    Iterator,
    +  8    Mapping,
    +  9    Optional,
    + 10    Tuple,
    + 11    Union,
    + 12)
    + 13
    + 14import xarray as xr
    + 15from loguru import logger
    + 16from numpy.typing import NDArray
    + 17from tqdm import tqdm
    + 18
    + 19from bioimageio.spec import load_description
    + 20from bioimageio.spec.common import PermissiveFileSource
    + 21from bioimageio.spec.model import v0_4, v0_5
    + 22
    + 23from ._prediction_pipeline import PredictionPipeline, create_prediction_pipeline
    + 24from .axis import AxisId
    + 25from .common import MemberId, PerMember
    + 26from .digest_spec import create_sample_for_model
    + 27from .io import save_sample
    + 28from .sample import Sample
    + 29from .tensor import Tensor
    + 30
    + 31
    + 32def predict(
    + 33    *,
    + 34    model: Union[
    + 35        PermissiveFileSource, v0_4.ModelDescr, v0_5.ModelDescr, PredictionPipeline
    + 36    ],
    + 37    inputs: Union[Sample, PerMember[Union[Tensor, xr.DataArray, NDArray[Any], Path]]],
    + 38    sample_id: Hashable = "sample",
    + 39    blocksize_parameter: Optional[
    + 40        Union[
    + 41            v0_5.ParameterizedSize_N,
    + 42            Mapping[Tuple[MemberId, AxisId], v0_5.ParameterizedSize_N],
    + 43        ]
    + 44    ] = None,
    + 45    input_block_shape: Optional[Mapping[MemberId, Mapping[AxisId, int]]] = None,
    + 46    skip_preprocessing: bool = False,
    + 47    skip_postprocessing: bool = False,
    + 48    save_output_path: Optional[Union[Path, str]] = None,
    + 49) -> Sample:
    + 50    """Run prediction for a single set of input(s) with a bioimage.io model
    + 51
    + 52    Args:
    + 53        model: model to predict with.
    + 54            May be given as RDF source, model description or prediction pipeline.
    + 55        inputs: the input sample or the named input(s) for this model as a dictionary
    + 56        sample_id: the sample id.
    + 57        blocksize_parameter: (optional) tile the input into blocks parametrized by
    + 58            blocksize according to any parametrized axis sizes defined in the model RDF.
    + 59            Note: For a predetermined, fixed block shape use `input_block_shape`
    + 60        input_block_shape: (optional) tile the input sample tensors into blocks.
    + 61            Note: For a parameterized block shape, not dealing with the exact block shape,
    + 62            use `blocksize_parameter`.
    + 63        skip_preprocessing: flag to skip the model's preprocessing
    + 64        skip_postprocessing: flag to skip the model's postprocessing
    + 65        save_output_path: A path with `{member_id}` `{sample_id}` in it
    + 66            to save the output to.
    + 67    """
    + 68    if save_output_path is not None:
    + 69        if "{member_id}" not in str(save_output_path):
    + 70            raise ValueError(
    + 71                f"Missing `{{member_id}}` in save_output_path={save_output_path}"
    + 72            )
    + 73
    + 74    if isinstance(model, PredictionPipeline):
    + 75        pp = model
    + 76    else:
    + 77        if not isinstance(model, (v0_4.ModelDescr, v0_5.ModelDescr)):
    + 78            loaded = load_description(model)
    + 79            if not isinstance(loaded, (v0_4.ModelDescr, v0_5.ModelDescr)):
    + 80                raise ValueError(f"expected model description, but got {loaded}")
    + 81            model = loaded
    + 82
    + 83        pp = create_prediction_pipeline(model)
    + 84
    + 85    if isinstance(inputs, Sample):
    + 86        sample = inputs
    + 87    else:
    + 88        sample = create_sample_for_model(
    + 89            pp.model_description, inputs=inputs, sample_id=sample_id
    + 90        )
    + 91
    + 92    if input_block_shape is not None:
    + 93        if blocksize_parameter is not None:
    + 94            logger.warning(
    + 95                "ignoring blocksize_parameter={} in favor of input_block_shape={}",
    + 96                blocksize_parameter,
    + 97                input_block_shape,
    + 98            )
    + 99
    +100        output = pp.predict_sample_with_fixed_blocking(
    +101            sample,
    +102            input_block_shape=input_block_shape,
    +103            skip_preprocessing=skip_preprocessing,
    +104            skip_postprocessing=skip_postprocessing,
    +105        )
    +106    elif blocksize_parameter is not None:
    +107        output = pp.predict_sample_with_blocking(
    +108            sample,
    +109            skip_preprocessing=skip_preprocessing,
    +110            skip_postprocessing=skip_postprocessing,
    +111            ns=blocksize_parameter,
    +112        )
    +113    else:
    +114        output = pp.predict_sample_without_blocking(
    +115            sample,
    +116            skip_preprocessing=skip_preprocessing,
    +117            skip_postprocessing=skip_postprocessing,
    +118        )
    +119    if save_output_path:
    +120        save_sample(save_output_path, output)
    +121
    +122    return output
    +123
    +124
    +125def predict_many(
    +126    *,
    +127    model: Union[
    +128        PermissiveFileSource, v0_4.ModelDescr, v0_5.ModelDescr, PredictionPipeline
    +129    ],
    +130    inputs: Iterable[PerMember[Union[Tensor, xr.DataArray, NDArray[Any], Path]]],
    +131    sample_id: str = "sample{i:03}",
    +132    blocksize_parameter: Optional[
    +133        Union[
    +134            v0_5.ParameterizedSize_N,
    +135            Mapping[Tuple[MemberId, AxisId], v0_5.ParameterizedSize_N],
    +136        ]
    +137    ] = None,
    +138    skip_preprocessing: bool = False,
    +139    skip_postprocessing: bool = False,
    +140    save_output_path: Optional[Union[Path, str]] = None,
    +141) -> Iterator[Sample]:
    +142    """Run prediction for a multiple sets of inputs with a bioimage.io model
    +143
    +144    Args:
    +145        model: model to predict with.
    +146            May be given as RDF source, model description or prediction pipeline.
    +147        inputs: An iterable of the named input(s) for this model as a dictionary.
    +148        sample_id: the sample id.
    +149            note: `{i}` will be formatted as the i-th sample.
    +150            If `{i}` (or `{i:`) is not present and `inputs` is an iterable `{i:03}` is appended.
    +151        blocksize_parameter: (optional) tile the input into blocks parametrized by
    +152            blocksize according to any parametrized axis sizes defined in the model RDF
    +153        skip_preprocessing: flag to skip the model's preprocessing
    +154        skip_postprocessing: flag to skip the model's postprocessing
    +155        save_output_path: A path with `{member_id}` `{sample_id}` in it
    +156            to save the output to.
    +157    """
    +158    if save_output_path is not None:
    +159        if "{member_id}" not in str(save_output_path):
    +160            raise ValueError(
    +161                f"Missing `{{member_id}}` in save_output_path={save_output_path}"
    +162            )
    +163
    +164        if not isinstance(inputs, collections.abc.Mapping) and "{sample_id}" not in str(
    +165            save_output_path
    +166        ):
    +167            raise ValueError(
    +168                f"Missing `{{sample_id}}` in save_output_path={save_output_path}"
    +169            )
    +170
    +171    if isinstance(model, PredictionPipeline):
    +172        pp = model
    +173    else:
    +174        if not isinstance(model, (v0_4.ModelDescr, v0_5.ModelDescr)):
    +175            loaded = load_description(model)
    +176            if not isinstance(loaded, (v0_4.ModelDescr, v0_5.ModelDescr)):
    +177                raise ValueError(f"expected model description, but got {loaded}")
    +178            model = loaded
    +179
    +180        pp = create_prediction_pipeline(model)
    +181
    +182    if not isinstance(inputs, collections.abc.Mapping):
    +183        sample_id = str(sample_id)
    +184        if "{i}" not in sample_id and "{i:" not in sample_id:
    +185            sample_id += "{i:03}"
    +186
    +187        total = len(inputs) if isinstance(inputs, collections.abc.Sized) else None
    +188
    +189        for i, ipts in tqdm(enumerate(inputs), total=total):
    +190            yield predict(
    +191                model=pp,
    +192                inputs=ipts,
    +193                sample_id=sample_id.format(i=i),
    +194                blocksize_parameter=blocksize_parameter,
    +195                skip_preprocessing=skip_preprocessing,
    +196                skip_postprocessing=skip_postprocessing,
    +197                save_output_path=save_output_path,
    +198            )
    +
    + + +
    +
    + +
    + + def + predict( *, model: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr, bioimageio.core.PredictionPipeline], inputs: Union[bioimageio.core.Sample, Mapping[bioimageio.spec.model.v0_5.TensorId, Union[bioimageio.core.Tensor, xarray.core.dataarray.DataArray, numpy.ndarray[Any, numpy.dtype[Any]], pathlib.Path]]], sample_id: Hashable = 'sample', blocksize_parameter: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], NoneType] = None, input_block_shape: Optional[Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]]] = None, skip_preprocessing: bool = False, skip_postprocessing: bool = False, save_output_path: Union[pathlib.Path, str, NoneType] = None) -> bioimageio.core.Sample: + + + +
    + +
     33def predict(
    + 34    *,
    + 35    model: Union[
    + 36        PermissiveFileSource, v0_4.ModelDescr, v0_5.ModelDescr, PredictionPipeline
    + 37    ],
    + 38    inputs: Union[Sample, PerMember[Union[Tensor, xr.DataArray, NDArray[Any], Path]]],
    + 39    sample_id: Hashable = "sample",
    + 40    blocksize_parameter: Optional[
    + 41        Union[
    + 42            v0_5.ParameterizedSize_N,
    + 43            Mapping[Tuple[MemberId, AxisId], v0_5.ParameterizedSize_N],
    + 44        ]
    + 45    ] = None,
    + 46    input_block_shape: Optional[Mapping[MemberId, Mapping[AxisId, int]]] = None,
    + 47    skip_preprocessing: bool = False,
    + 48    skip_postprocessing: bool = False,
    + 49    save_output_path: Optional[Union[Path, str]] = None,
    + 50) -> Sample:
    + 51    """Run prediction for a single set of input(s) with a bioimage.io model
    + 52
    + 53    Args:
    + 54        model: model to predict with.
    + 55            May be given as RDF source, model description or prediction pipeline.
    + 56        inputs: the input sample or the named input(s) for this model as a dictionary
    + 57        sample_id: the sample id.
    + 58        blocksize_parameter: (optional) tile the input into blocks parametrized by
    + 59            blocksize according to any parametrized axis sizes defined in the model RDF.
    + 60            Note: For a predetermined, fixed block shape use `input_block_shape`
    + 61        input_block_shape: (optional) tile the input sample tensors into blocks.
    + 62            Note: For a parameterized block shape, not dealing with the exact block shape,
    + 63            use `blocksize_parameter`.
    + 64        skip_preprocessing: flag to skip the model's preprocessing
    + 65        skip_postprocessing: flag to skip the model's postprocessing
    + 66        save_output_path: A path with `{member_id}` `{sample_id}` in it
    + 67            to save the output to.
    + 68    """
    + 69    if save_output_path is not None:
    + 70        if "{member_id}" not in str(save_output_path):
    + 71            raise ValueError(
    + 72                f"Missing `{{member_id}}` in save_output_path={save_output_path}"
    + 73            )
    + 74
    + 75    if isinstance(model, PredictionPipeline):
    + 76        pp = model
    + 77    else:
    + 78        if not isinstance(model, (v0_4.ModelDescr, v0_5.ModelDescr)):
    + 79            loaded = load_description(model)
    + 80            if not isinstance(loaded, (v0_4.ModelDescr, v0_5.ModelDescr)):
    + 81                raise ValueError(f"expected model description, but got {loaded}")
    + 82            model = loaded
    + 83
    + 84        pp = create_prediction_pipeline(model)
    + 85
    + 86    if isinstance(inputs, Sample):
    + 87        sample = inputs
    + 88    else:
    + 89        sample = create_sample_for_model(
    + 90            pp.model_description, inputs=inputs, sample_id=sample_id
    + 91        )
    + 92
    + 93    if input_block_shape is not None:
    + 94        if blocksize_parameter is not None:
    + 95            logger.warning(
    + 96                "ignoring blocksize_parameter={} in favor of input_block_shape={}",
    + 97                blocksize_parameter,
    + 98                input_block_shape,
    + 99            )
    +100
    +101        output = pp.predict_sample_with_fixed_blocking(
    +102            sample,
    +103            input_block_shape=input_block_shape,
    +104            skip_preprocessing=skip_preprocessing,
    +105            skip_postprocessing=skip_postprocessing,
    +106        )
    +107    elif blocksize_parameter is not None:
    +108        output = pp.predict_sample_with_blocking(
    +109            sample,
    +110            skip_preprocessing=skip_preprocessing,
    +111            skip_postprocessing=skip_postprocessing,
    +112            ns=blocksize_parameter,
    +113        )
    +114    else:
    +115        output = pp.predict_sample_without_blocking(
    +116            sample,
    +117            skip_preprocessing=skip_preprocessing,
    +118            skip_postprocessing=skip_postprocessing,
    +119        )
    +120    if save_output_path:
    +121        save_sample(save_output_path, output)
    +122
    +123    return output
    +
    + + +

    Run prediction for a single set of input(s) with a bioimage.io model

    + +
    Arguments:
    + +
      +
    • model: model to predict with. +May be given as RDF source, model description or prediction pipeline.
    • +
    • inputs: the input sample or the named input(s) for this model as a dictionary
    • +
    • sample_id: the sample id.
    • +
    • blocksize_parameter: (optional) tile the input into blocks parametrized by +blocksize according to any parametrized axis sizes defined in the model RDF. +Note: For a predetermined, fixed block shape use input_block_shape
    • +
    • input_block_shape: (optional) tile the input sample tensors into blocks. +Note: For a parameterized block shape, not dealing with the exact block shape, +use blocksize_parameter.
    • +
    • skip_preprocessing: flag to skip the model's preprocessing
    • +
    • skip_postprocessing: flag to skip the model's postprocessing
    • +
    • save_output_path: A path with {member_id} {sample_id} in it +to save the output to.
    • +
    +
    + + +
    +
    + +
    + + def + predict_many( *, model: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr, bioimageio.core.PredictionPipeline], inputs: Iterable[Mapping[bioimageio.spec.model.v0_5.TensorId, Union[bioimageio.core.Tensor, xarray.core.dataarray.DataArray, numpy.ndarray[Any, numpy.dtype[Any]], pathlib.Path]]], sample_id: str = 'sample{i:03}', blocksize_parameter: Union[int, Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], NoneType] = None, skip_preprocessing: bool = False, skip_postprocessing: bool = False, save_output_path: Union[pathlib.Path, str, NoneType] = None) -> Iterator[bioimageio.core.Sample]: + + + +
    + +
    126def predict_many(
    +127    *,
    +128    model: Union[
    +129        PermissiveFileSource, v0_4.ModelDescr, v0_5.ModelDescr, PredictionPipeline
    +130    ],
    +131    inputs: Iterable[PerMember[Union[Tensor, xr.DataArray, NDArray[Any], Path]]],
    +132    sample_id: str = "sample{i:03}",
    +133    blocksize_parameter: Optional[
    +134        Union[
    +135            v0_5.ParameterizedSize_N,
    +136            Mapping[Tuple[MemberId, AxisId], v0_5.ParameterizedSize_N],
    +137        ]
    +138    ] = None,
    +139    skip_preprocessing: bool = False,
    +140    skip_postprocessing: bool = False,
    +141    save_output_path: Optional[Union[Path, str]] = None,
    +142) -> Iterator[Sample]:
    +143    """Run prediction for a multiple sets of inputs with a bioimage.io model
    +144
    +145    Args:
    +146        model: model to predict with.
    +147            May be given as RDF source, model description or prediction pipeline.
    +148        inputs: An iterable of the named input(s) for this model as a dictionary.
    +149        sample_id: the sample id.
    +150            note: `{i}` will be formatted as the i-th sample.
    +151            If `{i}` (or `{i:`) is not present and `inputs` is an iterable `{i:03}` is appended.
    +152        blocksize_parameter: (optional) tile the input into blocks parametrized by
    +153            blocksize according to any parametrized axis sizes defined in the model RDF
    +154        skip_preprocessing: flag to skip the model's preprocessing
    +155        skip_postprocessing: flag to skip the model's postprocessing
    +156        save_output_path: A path with `{member_id}` `{sample_id}` in it
    +157            to save the output to.
    +158    """
    +159    if save_output_path is not None:
    +160        if "{member_id}" not in str(save_output_path):
    +161            raise ValueError(
    +162                f"Missing `{{member_id}}` in save_output_path={save_output_path}"
    +163            )
    +164
    +165        if not isinstance(inputs, collections.abc.Mapping) and "{sample_id}" not in str(
    +166            save_output_path
    +167        ):
    +168            raise ValueError(
    +169                f"Missing `{{sample_id}}` in save_output_path={save_output_path}"
    +170            )
    +171
    +172    if isinstance(model, PredictionPipeline):
    +173        pp = model
    +174    else:
    +175        if not isinstance(model, (v0_4.ModelDescr, v0_5.ModelDescr)):
    +176            loaded = load_description(model)
    +177            if not isinstance(loaded, (v0_4.ModelDescr, v0_5.ModelDescr)):
    +178                raise ValueError(f"expected model description, but got {loaded}")
    +179            model = loaded
    +180
    +181        pp = create_prediction_pipeline(model)
    +182
    +183    if not isinstance(inputs, collections.abc.Mapping):
    +184        sample_id = str(sample_id)
    +185        if "{i}" not in sample_id and "{i:" not in sample_id:
    +186            sample_id += "{i:03}"
    +187
    +188        total = len(inputs) if isinstance(inputs, collections.abc.Sized) else None
    +189
    +190        for i, ipts in tqdm(enumerate(inputs), total=total):
    +191            yield predict(
    +192                model=pp,
    +193                inputs=ipts,
    +194                sample_id=sample_id.format(i=i),
    +195                blocksize_parameter=blocksize_parameter,
    +196                skip_preprocessing=skip_preprocessing,
    +197                skip_postprocessing=skip_postprocessing,
    +198                save_output_path=save_output_path,
    +199            )
    +
    + + +

    Run prediction for a multiple sets of inputs with a bioimage.io model

    + +
    Arguments:
    + +
      +
    • model: model to predict with. +May be given as RDF source, model description or prediction pipeline.
    • +
    • inputs: An iterable of the named input(s) for this model as a dictionary.
    • +
    • sample_id: the sample id. +note: {i} will be formatted as the i-th sample. +If {i} (or {i:) is not present and inputs is an iterable {i:03} is appended.
    • +
    • blocksize_parameter: (optional) tile the input into blocks parametrized by +blocksize according to any parametrized axis sizes defined in the model RDF
    • +
    • skip_preprocessing: flag to skip the model's preprocessing
    • +
    • skip_postprocessing: flag to skip the model's postprocessing
    • +
    • save_output_path: A path with {member_id} {sample_id} in it +to save the output to.
    • +
    +
    + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/core/proc_ops.html b/bioimageio/core/proc_ops.html new file mode 100644 index 00000000..bb580a98 --- /dev/null +++ b/bioimageio/core/proc_ops.html @@ -0,0 +1,3242 @@ + + + + + + + bioimageio.core.proc_ops API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.core.proc_ops

    + + + + + + +
      1import collections.abc
    +  2from abc import ABC, abstractmethod
    +  3from dataclasses import InitVar, dataclass, field
    +  4from typing import (
    +  5    Collection,
    +  6    Literal,
    +  7    Mapping,
    +  8    Optional,
    +  9    Sequence,
    + 10    Set,
    + 11    Tuple,
    + 12    Union,
    + 13)
    + 14
    + 15import numpy as np
    + 16import xarray as xr
    + 17from typing_extensions import Self, assert_never
    + 18
    + 19from bioimageio.spec.model import v0_4, v0_5
    + 20
    + 21from ._op_base import BlockedOperator, Operator
    + 22from .axis import AxisId, PerAxis
    + 23from .block import Block
    + 24from .common import DTypeStr, MemberId
    + 25from .sample import Sample, SampleBlock, SampleBlockWithOrigin
    + 26from .stat_calculators import StatsCalculator
    + 27from .stat_measures import (
    + 28    DatasetMean,
    + 29    DatasetMeasure,
    + 30    DatasetPercentile,
    + 31    DatasetStd,
    + 32    MeanMeasure,
    + 33    Measure,
    + 34    MeasureValue,
    + 35    SampleMean,
    + 36    SampleQuantile,
    + 37    SampleStd,
    + 38    Stat,
    + 39    StdMeasure,
    + 40)
    + 41from .tensor import Tensor
    + 42
    + 43
    + 44def _convert_axis_ids(
    + 45    axes: v0_4.AxesInCZYX,
    + 46    mode: Literal["per_sample", "per_dataset"],
    + 47) -> Tuple[AxisId, ...]:
    + 48    if not isinstance(axes, str):
    + 49        return tuple(axes)
    + 50
    + 51    if mode == "per_sample":
    + 52        ret = []
    + 53    elif mode == "per_dataset":
    + 54        ret = [AxisId("b")]
    + 55    else:
    + 56        assert_never(mode)
    + 57
    + 58    ret.extend([AxisId(a) for a in axes])
    + 59    return tuple(ret)
    + 60
    + 61
    + 62@dataclass
    + 63class _SimpleOperator(BlockedOperator, ABC):
    + 64    input: MemberId
    + 65    output: MemberId
    + 66
    + 67    @property
    + 68    def required_measures(self) -> Collection[Measure]:
    + 69        return set()
    + 70
    + 71    @abstractmethod
    + 72    def get_output_shape(self, input_shape: PerAxis[int]) -> PerAxis[int]: ...
    + 73
    + 74    def __call__(self, sample: Union[Sample, SampleBlock]) -> None:
    + 75        if self.input not in sample.members:
    + 76            return
    + 77
    + 78        input_tensor = sample.members[self.input]
    + 79        output_tensor = self._apply(input_tensor, sample.stat)
    + 80
    + 81        if self.output in sample.members:
    + 82            assert (
    + 83                sample.members[self.output].tagged_shape == output_tensor.tagged_shape
    + 84            )
    + 85
    + 86        if isinstance(sample, Sample):
    + 87            sample.members[self.output] = output_tensor
    + 88        elif isinstance(sample, SampleBlock):
    + 89            b = sample.blocks[self.input]
    + 90            sample.blocks[self.output] = Block(
    + 91                sample_shape=self.get_output_shape(sample.shape[self.input]),
    + 92                data=output_tensor,
    + 93                inner_slice=b.inner_slice,
    + 94                halo=b.halo,
    + 95                block_index=b.block_index,
    + 96                blocks_in_sample=b.blocks_in_sample,
    + 97            )
    + 98        else:
    + 99            assert_never(sample)
    +100
    +101    @abstractmethod
    +102    def _apply(self, input: Tensor, stat: Stat) -> Tensor: ...
    +103
    +104
    +105@dataclass
    +106class AddKnownDatasetStats(BlockedOperator):
    +107    dataset_stats: Mapping[DatasetMeasure, MeasureValue]
    +108
    +109    @property
    +110    def required_measures(self) -> Set[Measure]:
    +111        return set()
    +112
    +113    def __call__(self, sample: Union[Sample, SampleBlock]) -> None:
    +114        sample.stat.update(self.dataset_stats.items())
    +115
    +116
    +117# @dataclass
    +118# class UpdateStats(Operator):
    +119#     """Calculates sample and/or dataset measures"""
    +120
    +121#     measures: Union[Sequence[Measure], Set[Measure], Mapping[Measure, MeasureValue]]
    +122#     """sample and dataset `measuers` to be calculated by this operator. Initial/fixed
    +123#     dataset measure values may be given, see `keep_updating_dataset_stats` for details.
    +124#     """
    +125#     keep_updating_dataset_stats: Optional[bool] = None
    +126#     """indicates if operator calls should keep updating dataset statistics or not
    +127
    +128#     default (None): if `measures` is a `Mapping` (i.e. initial measure values are
    +129#     given) no further updates to dataset statistics is conducted, otherwise (w.o.
    +130#     initial measure values) dataset statistics are updated by each processed sample.
    +131#     """
    +132#     _keep_updating_dataset_stats: bool = field(init=False)
    +133#     _stats_calculator: StatsCalculator = field(init=False)
    +134
    +135#     @property
    +136#     def required_measures(self) -> Set[Measure]:
    +137#         return set()
    +138
    +139#     def __post_init__(self):
    +140#         self._stats_calculator = StatsCalculator(self.measures)
    +141#         if self.keep_updating_dataset_stats is None:
    +142#             self._keep_updating_dataset_stats = not isinstance(self.measures, collections.abc.Mapping)
    +143#         else:
    +144#             self._keep_updating_dataset_stats = self.keep_updating_dataset_stats
    +145
    +146#     def __call__(self, sample_block: SampleBlockWithOrigin> None:
    +147#         if self._keep_updating_dataset_stats:
    +148#             sample.stat.update(self._stats_calculator.update_and_get_all(sample))
    +149#         else:
    +150#             sample.stat.update(self._stats_calculator.skip_update_and_get_all(sample))
    +151
    +152
    +153@dataclass
    +154class UpdateStats(Operator):
    +155    """Calculates sample and/or dataset measures"""
    +156
    +157    stats_calculator: StatsCalculator
    +158    """`StatsCalculator` to be used by this operator."""
    +159    keep_updating_initial_dataset_stats: bool = False
    +160    """indicates if operator calls should keep updating initial dataset statistics or not;
    +161    if the `stats_calculator` was not provided with any initial dataset statistics,
    +162    these are always updated with every new sample.
    +163    """
    +164    _keep_updating_dataset_stats: bool = field(init=False)
    +165
    +166    @property
    +167    def required_measures(self) -> Set[Measure]:
    +168        return set()
    +169
    +170    def __post_init__(self):
    +171        self._keep_updating_dataset_stats = (
    +172            self.keep_updating_initial_dataset_stats
    +173            or not self.stats_calculator.has_dataset_measures
    +174        )
    +175
    +176    def __call__(self, sample: Union[Sample, SampleBlockWithOrigin]) -> None:
    +177        if isinstance(sample, SampleBlockWithOrigin):
    +178            # update stats with whole sample on first block
    +179            if sample.block_index != 0:
    +180                return
    +181
    +182            origin = sample.origin
    +183        else:
    +184            origin = sample
    +185
    +186        if self._keep_updating_dataset_stats:
    +187            sample.stat.update(self.stats_calculator.update_and_get_all(origin))
    +188        else:
    +189            sample.stat.update(self.stats_calculator.skip_update_and_get_all(origin))
    +190
    +191
    +192@dataclass
    +193class Binarize(_SimpleOperator):
    +194    """'output = tensor > threshold'."""
    +195
    +196    threshold: Union[float, Sequence[float]]
    +197    axis: Optional[AxisId] = None
    +198
    +199    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +200        return input > self.threshold
    +201
    +202    def get_output_shape(
    +203        self, input_shape: Mapping[AxisId, int]
    +204    ) -> Mapping[AxisId, int]:
    +205        return input_shape
    +206
    +207    @classmethod
    +208    def from_proc_descr(
    +209        cls, descr: Union[v0_4.BinarizeDescr, v0_5.BinarizeDescr], member_id: MemberId
    +210    ) -> Self:
    +211        if isinstance(descr.kwargs, (v0_4.BinarizeKwargs, v0_5.BinarizeKwargs)):
    +212            return cls(
    +213                input=member_id, output=member_id, threshold=descr.kwargs.threshold
    +214            )
    +215        elif isinstance(descr.kwargs, v0_5.BinarizeAlongAxisKwargs):
    +216            return cls(
    +217                input=member_id,
    +218                output=member_id,
    +219                threshold=descr.kwargs.threshold,
    +220                axis=descr.kwargs.axis,
    +221            )
    +222        else:
    +223            assert_never(descr.kwargs)
    +224
    +225
    +226@dataclass
    +227class Clip(_SimpleOperator):
    +228    min: Optional[float] = None
    +229    """minimum value for clipping"""
    +230    max: Optional[float] = None
    +231    """maximum value for clipping"""
    +232
    +233    def __post_init__(self):
    +234        assert self.min is not None or self.max is not None, "missing min or max value"
    +235        assert (
    +236            self.min is None or self.max is None or self.min < self.max
    +237        ), f"expected min < max, but {self.min} !< {self.max}"
    +238
    +239    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +240        return input.clip(self.min, self.max)
    +241
    +242    def get_output_shape(
    +243        self, input_shape: Mapping[AxisId, int]
    +244    ) -> Mapping[AxisId, int]:
    +245        return input_shape
    +246
    +247    @classmethod
    +248    def from_proc_descr(
    +249        cls, descr: Union[v0_4.ClipDescr, v0_5.ClipDescr], member_id: MemberId
    +250    ) -> Self:
    +251        return cls(
    +252            input=member_id,
    +253            output=member_id,
    +254            min=descr.kwargs.min,
    +255            max=descr.kwargs.max,
    +256        )
    +257
    +258
    +259@dataclass
    +260class EnsureDtype(_SimpleOperator):
    +261    dtype: DTypeStr
    +262
    +263    @classmethod
    +264    def from_proc_descr(cls, descr: v0_5.EnsureDtypeDescr, member_id: MemberId):
    +265        return cls(input=member_id, output=member_id, dtype=descr.kwargs.dtype)
    +266
    +267    def get_descr(self):
    +268        return v0_5.EnsureDtypeDescr(kwargs=v0_5.EnsureDtypeKwargs(dtype=self.dtype))
    +269
    +270    def get_output_shape(
    +271        self, input_shape: Mapping[AxisId, int]
    +272    ) -> Mapping[AxisId, int]:
    +273        return input_shape
    +274
    +275    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +276        return input.astype(self.dtype)
    +277
    +278
    +279@dataclass
    +280class ScaleLinear(_SimpleOperator):
    +281    gain: Union[float, xr.DataArray] = 1.0
    +282    """multiplicative factor"""
    +283
    +284    offset: Union[float, xr.DataArray] = 0.0
    +285    """additive term"""
    +286
    +287    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +288        return input * self.gain + self.offset
    +289
    +290    def get_output_shape(
    +291        self, input_shape: Mapping[AxisId, int]
    +292    ) -> Mapping[AxisId, int]:
    +293        return input_shape
    +294
    +295    @classmethod
    +296    def from_proc_descr(
    +297        cls,
    +298        descr: Union[v0_4.ScaleLinearDescr, v0_5.ScaleLinearDescr],
    +299        member_id: MemberId,
    +300    ) -> Self:
    +301        kwargs = descr.kwargs
    +302        if isinstance(kwargs, v0_5.ScaleLinearAlongAxisKwargs):
    +303            axis = kwargs.axis
    +304        elif isinstance(kwargs, (v0_4.ScaleLinearKwargs, v0_5.ScaleLinearKwargs)):
    +305            axis = None
    +306        else:
    +307            assert_never(kwargs)
    +308
    +309        if axis:
    +310            gain = xr.DataArray(np.atleast_1d(kwargs.gain), dims=axis)
    +311            offset = xr.DataArray(np.atleast_1d(kwargs.offset), dims=axis)
    +312        else:
    +313            assert (
    +314                isinstance(kwargs.gain, (float, int)) or len(kwargs.gain) == 1
    +315            ), kwargs.gain
    +316            gain = (
    +317                kwargs.gain if isinstance(kwargs.gain, (float, int)) else kwargs.gain[0]
    +318            )
    +319            assert isinstance(kwargs.offset, (float, int)) or len(kwargs.offset) == 1
    +320            offset = (
    +321                kwargs.offset
    +322                if isinstance(kwargs.offset, (float, int))
    +323                else kwargs.offset[0]
    +324            )
    +325
    +326        return cls(input=member_id, output=member_id, gain=gain, offset=offset)
    +327
    +328
    +329@dataclass
    +330class ScaleMeanVariance(_SimpleOperator):
    +331    axes: Optional[Sequence[AxisId]] = None
    +332    reference_tensor: Optional[MemberId] = None
    +333    eps: float = 1e-6
    +334    mean: Union[SampleMean, DatasetMean] = field(init=False)
    +335    std: Union[SampleStd, DatasetStd] = field(init=False)
    +336    ref_mean: Union[SampleMean, DatasetMean] = field(init=False)
    +337    ref_std: Union[SampleStd, DatasetStd] = field(init=False)
    +338
    +339    @property
    +340    def required_measures(self):
    +341        return {self.mean, self.std, self.ref_mean, self.ref_std}
    +342
    +343    def __post_init__(self):
    +344        axes = None if self.axes is None else tuple(self.axes)
    +345        ref_tensor = self.reference_tensor or self.input
    +346        if axes is None or AxisId("batch") not in axes:
    +347            Mean = SampleMean
    +348            Std = SampleStd
    +349        else:
    +350            Mean = DatasetMean
    +351            Std = DatasetStd
    +352
    +353        self.mean = Mean(member_id=self.input, axes=axes)
    +354        self.std = Std(member_id=self.input, axes=axes)
    +355        self.ref_mean = Mean(member_id=ref_tensor, axes=axes)
    +356        self.ref_std = Std(member_id=ref_tensor, axes=axes)
    +357
    +358    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +359        mean = stat[self.mean]
    +360        std = stat[self.std] + self.eps
    +361        ref_mean = stat[self.ref_mean]
    +362        ref_std = stat[self.ref_std] + self.eps
    +363        return (input - mean) / std * ref_std + ref_mean
    +364
    +365    def get_output_shape(
    +366        self, input_shape: Mapping[AxisId, int]
    +367    ) -> Mapping[AxisId, int]:
    +368        return input_shape
    +369
    +370    @classmethod
    +371    def from_proc_descr(
    +372        cls,
    +373        descr: Union[v0_4.ScaleMeanVarianceDescr, v0_5.ScaleMeanVarianceDescr],
    +374        member_id: MemberId,
    +375    ) -> Self:
    +376        kwargs = descr.kwargs
    +377        _, axes = _get_axes(descr.kwargs)
    +378
    +379        return cls(
    +380            input=member_id,
    +381            output=member_id,
    +382            reference_tensor=MemberId(str(kwargs.reference_tensor)),
    +383            axes=axes,
    +384            eps=kwargs.eps,
    +385        )
    +386
    +387
    +388def _get_axes(
    +389    kwargs: Union[
    +390        v0_4.ZeroMeanUnitVarianceKwargs,
    +391        v0_5.ZeroMeanUnitVarianceKwargs,
    +392        v0_4.ScaleRangeKwargs,
    +393        v0_5.ScaleRangeKwargs,
    +394        v0_4.ScaleMeanVarianceKwargs,
    +395        v0_5.ScaleMeanVarianceKwargs,
    +396    ],
    +397) -> Tuple[bool, Optional[Tuple[AxisId, ...]]]:
    +398    if kwargs.axes is None:
    +399        return True, None
    +400    elif isinstance(kwargs.axes, str):
    +401        axes = _convert_axis_ids(kwargs.axes, kwargs["mode"])
    +402        return AxisId("b") in axes, axes
    +403    elif isinstance(kwargs.axes, collections.abc.Sequence):
    +404        axes = tuple(kwargs.axes)
    +405        return AxisId("batch") in axes, axes
    +406    else:
    +407        assert_never(kwargs.axes)
    +408
    +409
    +410@dataclass
    +411class ScaleRange(_SimpleOperator):
    +412    lower_percentile: InitVar[Optional[Union[SampleQuantile, DatasetPercentile]]] = None
    +413    upper_percentile: InitVar[Optional[Union[SampleQuantile, DatasetPercentile]]] = None
    +414    lower: Union[SampleQuantile, DatasetPercentile] = field(init=False)
    +415    upper: Union[SampleQuantile, DatasetPercentile] = field(init=False)
    +416
    +417    eps: float = 1e-6
    +418
    +419    def __post_init__(
    +420        self,
    +421        lower_percentile: Optional[Union[SampleQuantile, DatasetPercentile]],
    +422        upper_percentile: Optional[Union[SampleQuantile, DatasetPercentile]],
    +423    ):
    +424        if lower_percentile is None:
    +425            tid = self.input if upper_percentile is None else upper_percentile.member_id
    +426            self.lower = DatasetPercentile(q=0.0, member_id=tid)
    +427        else:
    +428            self.lower = lower_percentile
    +429
    +430        if upper_percentile is None:
    +431            self.upper = DatasetPercentile(q=1.0, member_id=self.lower.member_id)
    +432        else:
    +433            self.upper = upper_percentile
    +434
    +435        assert self.lower.member_id == self.upper.member_id
    +436        assert self.lower.q < self.upper.q
    +437        assert self.lower.axes == self.upper.axes
    +438
    +439    @property
    +440    def required_measures(self):
    +441        return {self.lower, self.upper}
    +442
    +443    def get_output_shape(
    +444        self, input_shape: Mapping[AxisId, int]
    +445    ) -> Mapping[AxisId, int]:
    +446        return input_shape
    +447
    +448    @classmethod
    +449    def from_proc_descr(
    +450        cls,
    +451        descr: Union[v0_4.ScaleRangeDescr, v0_5.ScaleRangeDescr],
    +452        member_id: MemberId,
    +453    ):
    +454        kwargs = descr.kwargs
    +455        ref_tensor = (
    +456            member_id
    +457            if kwargs.reference_tensor is None
    +458            else MemberId(str(kwargs.reference_tensor))
    +459        )
    +460        dataset_mode, axes = _get_axes(descr.kwargs)
    +461        if dataset_mode:
    +462            Percentile = DatasetPercentile
    +463        else:
    +464            Percentile = SampleQuantile
    +465
    +466        return cls(
    +467            input=member_id,
    +468            output=member_id,
    +469            lower_percentile=Percentile(
    +470                q=kwargs.min_percentile / 100, axes=axes, member_id=ref_tensor
    +471            ),
    +472            upper_percentile=Percentile(
    +473                q=kwargs.max_percentile / 100, axes=axes, member_id=ref_tensor
    +474            ),
    +475        )
    +476
    +477    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +478        lower = stat[self.lower]
    +479        upper = stat[self.upper]
    +480        return (input - lower) / (upper - lower + self.eps)
    +481
    +482    def get_descr(self):
    +483        assert self.lower.axes == self.upper.axes
    +484        assert self.lower.member_id == self.upper.member_id
    +485
    +486        return v0_5.ScaleRangeDescr(
    +487            kwargs=v0_5.ScaleRangeKwargs(
    +488                axes=self.lower.axes,
    +489                min_percentile=self.lower.q * 100,
    +490                max_percentile=self.upper.q * 100,
    +491                eps=self.eps,
    +492                reference_tensor=self.lower.member_id,
    +493            )
    +494        )
    +495
    +496
    +497@dataclass
    +498class Sigmoid(_SimpleOperator):
    +499    """1 / (1 + e^(-input))."""
    +500
    +501    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +502        return Tensor(1.0 / (1.0 + np.exp(-input)), dims=input.dims)
    +503
    +504    @property
    +505    def required_measures(self) -> Collection[Measure]:
    +506        return {}
    +507
    +508    def get_output_shape(
    +509        self, input_shape: Mapping[AxisId, int]
    +510    ) -> Mapping[AxisId, int]:
    +511        return input_shape
    +512
    +513    @classmethod
    +514    def from_proc_descr(
    +515        cls, descr: Union[v0_4.SigmoidDescr, v0_5.SigmoidDescr], member_id: MemberId
    +516    ) -> Self:
    +517        assert isinstance(descr, (v0_4.SigmoidDescr, v0_5.SigmoidDescr))
    +518        return cls(input=member_id, output=member_id)
    +519
    +520    def get_descr(self):
    +521        return v0_5.SigmoidDescr()
    +522
    +523
    +524@dataclass
    +525class ZeroMeanUnitVariance(_SimpleOperator):
    +526    """normalize to zero mean, unit variance."""
    +527
    +528    mean: MeanMeasure
    +529    std: StdMeasure
    +530
    +531    eps: float = 1e-6
    +532
    +533    def __post_init__(self):
    +534        assert self.mean.axes == self.std.axes
    +535
    +536    @property
    +537    def required_measures(self) -> Set[Union[MeanMeasure, StdMeasure]]:
    +538        return {self.mean, self.std}
    +539
    +540    def get_output_shape(
    +541        self, input_shape: Mapping[AxisId, int]
    +542    ) -> Mapping[AxisId, int]:
    +543        return input_shape
    +544
    +545    @classmethod
    +546    def from_proc_descr(
    +547        cls,
    +548        descr: Union[v0_4.ZeroMeanUnitVarianceDescr, v0_5.ZeroMeanUnitVarianceDescr],
    +549        member_id: MemberId,
    +550    ):
    +551        dataset_mode, axes = _get_axes(descr.kwargs)
    +552
    +553        if dataset_mode:
    +554            Mean = DatasetMean
    +555            Std = DatasetStd
    +556        else:
    +557            Mean = SampleMean
    +558            Std = SampleStd
    +559
    +560        return cls(
    +561            input=member_id,
    +562            output=member_id,
    +563            mean=Mean(axes=axes, member_id=member_id),
    +564            std=Std(axes=axes, member_id=member_id),
    +565        )
    +566
    +567    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +568        mean = stat[self.mean]
    +569        std = stat[self.std]
    +570        return (input - mean) / (std + self.eps)
    +571
    +572    def get_descr(self):
    +573        return v0_5.ZeroMeanUnitVarianceDescr(
    +574            kwargs=v0_5.ZeroMeanUnitVarianceKwargs(axes=self.mean.axes, eps=self.eps)
    +575        )
    +576
    +577
    +578@dataclass
    +579class FixedZeroMeanUnitVariance(_SimpleOperator):
    +580    """normalize to zero mean, unit variance with precomputed values."""
    +581
    +582    mean: Union[float, xr.DataArray]
    +583    std: Union[float, xr.DataArray]
    +584
    +585    eps: float = 1e-6
    +586
    +587    def __post_init__(self):
    +588        assert (
    +589            isinstance(self.mean, (int, float))
    +590            or isinstance(self.std, (int, float))
    +591            or self.mean.dims == self.std.dims
    +592        )
    +593
    +594    def get_output_shape(
    +595        self, input_shape: Mapping[AxisId, int]
    +596    ) -> Mapping[AxisId, int]:
    +597        return input_shape
    +598
    +599    @classmethod
    +600    def from_proc_descr(
    +601        cls,
    +602        descr: v0_5.FixedZeroMeanUnitVarianceDescr,
    +603        member_id: MemberId,
    +604    ) -> Self:
    +605        if isinstance(descr.kwargs, v0_5.FixedZeroMeanUnitVarianceKwargs):
    +606            dims = None
    +607        elif isinstance(descr.kwargs, v0_5.FixedZeroMeanUnitVarianceAlongAxisKwargs):
    +608            dims = (descr.kwargs.axis,)
    +609        else:
    +610            assert_never(descr.kwargs)
    +611
    +612        return cls(
    +613            input=member_id,
    +614            output=member_id,
    +615            mean=xr.DataArray(descr.kwargs.mean, dims=dims),
    +616            std=xr.DataArray(descr.kwargs.std, dims=dims),
    +617        )
    +618
    +619    def get_descr(self):
    +620        if isinstance(self.mean, (int, float)):
    +621            assert isinstance(self.std, (int, float))
    +622            kwargs = v0_5.FixedZeroMeanUnitVarianceKwargs(mean=self.mean, std=self.std)
    +623        else:
    +624            assert isinstance(self.std, xr.DataArray)
    +625            assert len(self.mean.dims) == 1
    +626            kwargs = v0_5.FixedZeroMeanUnitVarianceAlongAxisKwargs(
    +627                axis=AxisId(str(self.mean.dims[0])),
    +628                mean=list(self.mean),
    +629                std=list(self.std),
    +630            )
    +631
    +632        return v0_5.FixedZeroMeanUnitVarianceDescr(kwargs=kwargs)
    +633
    +634    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +635        return (input - self.mean) / (self.std + self.eps)
    +636
    +637
    +638ProcDescr = Union[
    +639    v0_4.PreprocessingDescr,
    +640    v0_4.PostprocessingDescr,
    +641    v0_5.PreprocessingDescr,
    +642    v0_5.PostprocessingDescr,
    +643]
    +644
    +645Processing = Union[
    +646    AddKnownDatasetStats,
    +647    Binarize,
    +648    Clip,
    +649    EnsureDtype,
    +650    FixedZeroMeanUnitVariance,
    +651    ScaleLinear,
    +652    ScaleMeanVariance,
    +653    ScaleRange,
    +654    Sigmoid,
    +655    UpdateStats,
    +656    ZeroMeanUnitVariance,
    +657]
    +658
    +659
    +660def get_proc_class(proc_spec: ProcDescr):
    +661    if isinstance(proc_spec, (v0_4.BinarizeDescr, v0_5.BinarizeDescr)):
    +662        return Binarize
    +663    elif isinstance(proc_spec, (v0_4.ClipDescr, v0_5.ClipDescr)):
    +664        return Clip
    +665    elif isinstance(proc_spec, v0_5.EnsureDtypeDescr):
    +666        return EnsureDtype
    +667    elif isinstance(proc_spec, v0_5.FixedZeroMeanUnitVarianceDescr):
    +668        return FixedZeroMeanUnitVariance
    +669    elif isinstance(proc_spec, (v0_4.ScaleLinearDescr, v0_5.ScaleLinearDescr)):
    +670        return ScaleLinear
    +671    elif isinstance(
    +672        proc_spec, (v0_4.ScaleMeanVarianceDescr, v0_5.ScaleMeanVarianceDescr)
    +673    ):
    +674        return ScaleMeanVariance
    +675    elif isinstance(proc_spec, (v0_4.ScaleRangeDescr, v0_5.ScaleRangeDescr)):
    +676        return ScaleRange
    +677    elif isinstance(proc_spec, (v0_4.SigmoidDescr, v0_5.SigmoidDescr)):
    +678        return Sigmoid
    +679    elif (
    +680        isinstance(proc_spec, v0_4.ZeroMeanUnitVarianceDescr)
    +681        and proc_spec.kwargs.mode == "fixed"
    +682    ):
    +683        return FixedZeroMeanUnitVariance
    +684    elif isinstance(
    +685        proc_spec,
    +686        (v0_4.ZeroMeanUnitVarianceDescr, v0_5.ZeroMeanUnitVarianceDescr),
    +687    ):
    +688        return ZeroMeanUnitVariance
    +689    else:
    +690        assert_never(proc_spec)
    +
    + + +
    +
    + +
    +
    @dataclass
    + + class + AddKnownDatasetStats(bioimageio.core._op_base.BlockedOperator): + + + +
    + +
    106@dataclass
    +107class AddKnownDatasetStats(BlockedOperator):
    +108    dataset_stats: Mapping[DatasetMeasure, MeasureValue]
    +109
    +110    @property
    +111    def required_measures(self) -> Set[Measure]:
    +112        return set()
    +113
    +114    def __call__(self, sample: Union[Sample, SampleBlock]) -> None:
    +115        sample.stat.update(self.dataset_stats.items())
    +
    + + + + +
    +
    + + AddKnownDatasetStats( dataset_stats: Mapping[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]]) + + +
    + + + + +
    +
    +
    + dataset_stats: Mapping[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]] + + +
    + + + + +
    +
    + +
    + required_measures: Set[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)]] + + + +
    + +
    110    @property
    +111    def required_measures(self) -> Set[Measure]:
    +112        return set()
    +
    + + + + +
    +
    +
    + +
    +
    @dataclass
    + + class + UpdateStats(bioimageio.core._op_base.Operator): + + + +
    + +
    154@dataclass
    +155class UpdateStats(Operator):
    +156    """Calculates sample and/or dataset measures"""
    +157
    +158    stats_calculator: StatsCalculator
    +159    """`StatsCalculator` to be used by this operator."""
    +160    keep_updating_initial_dataset_stats: bool = False
    +161    """indicates if operator calls should keep updating initial dataset statistics or not;
    +162    if the `stats_calculator` was not provided with any initial dataset statistics,
    +163    these are always updated with every new sample.
    +164    """
    +165    _keep_updating_dataset_stats: bool = field(init=False)
    +166
    +167    @property
    +168    def required_measures(self) -> Set[Measure]:
    +169        return set()
    +170
    +171    def __post_init__(self):
    +172        self._keep_updating_dataset_stats = (
    +173            self.keep_updating_initial_dataset_stats
    +174            or not self.stats_calculator.has_dataset_measures
    +175        )
    +176
    +177    def __call__(self, sample: Union[Sample, SampleBlockWithOrigin]) -> None:
    +178        if isinstance(sample, SampleBlockWithOrigin):
    +179            # update stats with whole sample on first block
    +180            if sample.block_index != 0:
    +181                return
    +182
    +183            origin = sample.origin
    +184        else:
    +185            origin = sample
    +186
    +187        if self._keep_updating_dataset_stats:
    +188            sample.stat.update(self.stats_calculator.update_and_get_all(origin))
    +189        else:
    +190            sample.stat.update(self.stats_calculator.skip_update_and_get_all(origin))
    +
    + + +

    Calculates sample and/or dataset measures

    +
    + + +
    +
    + + UpdateStats( stats_calculator: bioimageio.core.stat_calculators.StatsCalculator, keep_updating_initial_dataset_stats: bool = False) + + +
    + + + + +
    +
    + + + +

    StatsCalculator to be used by this operator.

    +
    + + +
    +
    +
    + keep_updating_initial_dataset_stats: bool = +False + + +
    + + +

    indicates if operator calls should keep updating initial dataset statistics or not; +if the stats_calculator was not provided with any initial dataset statistics, +these are always updated with every new sample.

    +
    + + +
    +
    + +
    + required_measures: Set[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)]] + + + +
    + +
    167    @property
    +168    def required_measures(self) -> Set[Measure]:
    +169        return set()
    +
    + + + + +
    +
    +
    + +
    +
    @dataclass
    + + class + Binarize(_SimpleOperator): + + + +
    + +
    193@dataclass
    +194class Binarize(_SimpleOperator):
    +195    """'output = tensor > threshold'."""
    +196
    +197    threshold: Union[float, Sequence[float]]
    +198    axis: Optional[AxisId] = None
    +199
    +200    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +201        return input > self.threshold
    +202
    +203    def get_output_shape(
    +204        self, input_shape: Mapping[AxisId, int]
    +205    ) -> Mapping[AxisId, int]:
    +206        return input_shape
    +207
    +208    @classmethod
    +209    def from_proc_descr(
    +210        cls, descr: Union[v0_4.BinarizeDescr, v0_5.BinarizeDescr], member_id: MemberId
    +211    ) -> Self:
    +212        if isinstance(descr.kwargs, (v0_4.BinarizeKwargs, v0_5.BinarizeKwargs)):
    +213            return cls(
    +214                input=member_id, output=member_id, threshold=descr.kwargs.threshold
    +215            )
    +216        elif isinstance(descr.kwargs, v0_5.BinarizeAlongAxisKwargs):
    +217            return cls(
    +218                input=member_id,
    +219                output=member_id,
    +220                threshold=descr.kwargs.threshold,
    +221                axis=descr.kwargs.axis,
    +222            )
    +223        else:
    +224            assert_never(descr.kwargs)
    +
    + + +

    'output = tensor > threshold'.

    +
    + + +
    +
    + + Binarize( input: bioimageio.spec.model.v0_5.TensorId, output: bioimageio.spec.model.v0_5.TensorId, threshold: Union[float, Sequence[float]], axis: Optional[bioimageio.spec.model.v0_5.AxisId] = None) + + +
    + + + + +
    +
    +
    + threshold: Union[float, Sequence[float]] + + +
    + + + + +
    +
    +
    + axis: Optional[bioimageio.spec.model.v0_5.AxisId] = +None + + +
    + + + + +
    +
    + +
    + + def + get_output_shape( self, input_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int]) -> Mapping[bioimageio.spec.model.v0_5.AxisId, int]: + + + +
    + +
    203    def get_output_shape(
    +204        self, input_shape: Mapping[AxisId, int]
    +205    ) -> Mapping[AxisId, int]:
    +206        return input_shape
    +
    + + + + +
    +
    + +
    +
    @classmethod
    + + def + from_proc_descr( cls, descr: Union[bioimageio.spec.model.v0_4.BinarizeDescr, bioimageio.spec.model.v0_5.BinarizeDescr], member_id: bioimageio.spec.model.v0_5.TensorId) -> Self: + + + +
    + +
    208    @classmethod
    +209    def from_proc_descr(
    +210        cls, descr: Union[v0_4.BinarizeDescr, v0_5.BinarizeDescr], member_id: MemberId
    +211    ) -> Self:
    +212        if isinstance(descr.kwargs, (v0_4.BinarizeKwargs, v0_5.BinarizeKwargs)):
    +213            return cls(
    +214                input=member_id, output=member_id, threshold=descr.kwargs.threshold
    +215            )
    +216        elif isinstance(descr.kwargs, v0_5.BinarizeAlongAxisKwargs):
    +217            return cls(
    +218                input=member_id,
    +219                output=member_id,
    +220                threshold=descr.kwargs.threshold,
    +221                axis=descr.kwargs.axis,
    +222            )
    +223        else:
    +224            assert_never(descr.kwargs)
    +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    +
    @dataclass
    + + class + Clip(_SimpleOperator): + + + +
    + +
    227@dataclass
    +228class Clip(_SimpleOperator):
    +229    min: Optional[float] = None
    +230    """minimum value for clipping"""
    +231    max: Optional[float] = None
    +232    """maximum value for clipping"""
    +233
    +234    def __post_init__(self):
    +235        assert self.min is not None or self.max is not None, "missing min or max value"
    +236        assert (
    +237            self.min is None or self.max is None or self.min < self.max
    +238        ), f"expected min < max, but {self.min} !< {self.max}"
    +239
    +240    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +241        return input.clip(self.min, self.max)
    +242
    +243    def get_output_shape(
    +244        self, input_shape: Mapping[AxisId, int]
    +245    ) -> Mapping[AxisId, int]:
    +246        return input_shape
    +247
    +248    @classmethod
    +249    def from_proc_descr(
    +250        cls, descr: Union[v0_4.ClipDescr, v0_5.ClipDescr], member_id: MemberId
    +251    ) -> Self:
    +252        return cls(
    +253            input=member_id,
    +254            output=member_id,
    +255            min=descr.kwargs.min,
    +256            max=descr.kwargs.max,
    +257        )
    +
    + + + + +
    +
    + + Clip( input: bioimageio.spec.model.v0_5.TensorId, output: bioimageio.spec.model.v0_5.TensorId, min: Optional[float] = None, max: Optional[float] = None) + + +
    + + + + +
    +
    +
    + min: Optional[float] = +None + + +
    + + +

    minimum value for clipping

    +
    + + +
    +
    +
    + max: Optional[float] = +None + + +
    + + +

    maximum value for clipping

    +
    + + +
    +
    + +
    + + def + get_output_shape( self, input_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int]) -> Mapping[bioimageio.spec.model.v0_5.AxisId, int]: + + + +
    + +
    243    def get_output_shape(
    +244        self, input_shape: Mapping[AxisId, int]
    +245    ) -> Mapping[AxisId, int]:
    +246        return input_shape
    +
    + + + + +
    +
    + +
    +
    @classmethod
    + + def + from_proc_descr( cls, descr: Union[bioimageio.spec.model.v0_4.ClipDescr, bioimageio.spec.model.v0_5.ClipDescr], member_id: bioimageio.spec.model.v0_5.TensorId) -> Self: + + + +
    + +
    248    @classmethod
    +249    def from_proc_descr(
    +250        cls, descr: Union[v0_4.ClipDescr, v0_5.ClipDescr], member_id: MemberId
    +251    ) -> Self:
    +252        return cls(
    +253            input=member_id,
    +254            output=member_id,
    +255            min=descr.kwargs.min,
    +256            max=descr.kwargs.max,
    +257        )
    +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    +
    @dataclass
    + + class + EnsureDtype(_SimpleOperator): + + + +
    + +
    260@dataclass
    +261class EnsureDtype(_SimpleOperator):
    +262    dtype: DTypeStr
    +263
    +264    @classmethod
    +265    def from_proc_descr(cls, descr: v0_5.EnsureDtypeDescr, member_id: MemberId):
    +266        return cls(input=member_id, output=member_id, dtype=descr.kwargs.dtype)
    +267
    +268    def get_descr(self):
    +269        return v0_5.EnsureDtypeDescr(kwargs=v0_5.EnsureDtypeKwargs(dtype=self.dtype))
    +270
    +271    def get_output_shape(
    +272        self, input_shape: Mapping[AxisId, int]
    +273    ) -> Mapping[AxisId, int]:
    +274        return input_shape
    +275
    +276    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +277        return input.astype(self.dtype)
    +
    + + + + +
    +
    + + EnsureDtype( input: bioimageio.spec.model.v0_5.TensorId, output: bioimageio.spec.model.v0_5.TensorId, dtype: Literal['bool', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64']) + + +
    + + + + +
    +
    +
    + dtype: Literal['bool', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] + + +
    + + + + +
    +
    + +
    +
    @classmethod
    + + def + from_proc_descr( cls, descr: bioimageio.spec.model.v0_5.EnsureDtypeDescr, member_id: bioimageio.spec.model.v0_5.TensorId): + + + +
    + +
    264    @classmethod
    +265    def from_proc_descr(cls, descr: v0_5.EnsureDtypeDescr, member_id: MemberId):
    +266        return cls(input=member_id, output=member_id, dtype=descr.kwargs.dtype)
    +
    + + + + +
    +
    + +
    + + def + get_descr(self): + + + +
    + +
    268    def get_descr(self):
    +269        return v0_5.EnsureDtypeDescr(kwargs=v0_5.EnsureDtypeKwargs(dtype=self.dtype))
    +
    + + + + +
    +
    + +
    + + def + get_output_shape( self, input_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int]) -> Mapping[bioimageio.spec.model.v0_5.AxisId, int]: + + + +
    + +
    271    def get_output_shape(
    +272        self, input_shape: Mapping[AxisId, int]
    +273    ) -> Mapping[AxisId, int]:
    +274        return input_shape
    +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    +
    @dataclass
    + + class + ScaleLinear(_SimpleOperator): + + + +
    + +
    280@dataclass
    +281class ScaleLinear(_SimpleOperator):
    +282    gain: Union[float, xr.DataArray] = 1.0
    +283    """multiplicative factor"""
    +284
    +285    offset: Union[float, xr.DataArray] = 0.0
    +286    """additive term"""
    +287
    +288    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +289        return input * self.gain + self.offset
    +290
    +291    def get_output_shape(
    +292        self, input_shape: Mapping[AxisId, int]
    +293    ) -> Mapping[AxisId, int]:
    +294        return input_shape
    +295
    +296    @classmethod
    +297    def from_proc_descr(
    +298        cls,
    +299        descr: Union[v0_4.ScaleLinearDescr, v0_5.ScaleLinearDescr],
    +300        member_id: MemberId,
    +301    ) -> Self:
    +302        kwargs = descr.kwargs
    +303        if isinstance(kwargs, v0_5.ScaleLinearAlongAxisKwargs):
    +304            axis = kwargs.axis
    +305        elif isinstance(kwargs, (v0_4.ScaleLinearKwargs, v0_5.ScaleLinearKwargs)):
    +306            axis = None
    +307        else:
    +308            assert_never(kwargs)
    +309
    +310        if axis:
    +311            gain = xr.DataArray(np.atleast_1d(kwargs.gain), dims=axis)
    +312            offset = xr.DataArray(np.atleast_1d(kwargs.offset), dims=axis)
    +313        else:
    +314            assert (
    +315                isinstance(kwargs.gain, (float, int)) or len(kwargs.gain) == 1
    +316            ), kwargs.gain
    +317            gain = (
    +318                kwargs.gain if isinstance(kwargs.gain, (float, int)) else kwargs.gain[0]
    +319            )
    +320            assert isinstance(kwargs.offset, (float, int)) or len(kwargs.offset) == 1
    +321            offset = (
    +322                kwargs.offset
    +323                if isinstance(kwargs.offset, (float, int))
    +324                else kwargs.offset[0]
    +325            )
    +326
    +327        return cls(input=member_id, output=member_id, gain=gain, offset=offset)
    +
    + + + + +
    +
    + + ScaleLinear( input: bioimageio.spec.model.v0_5.TensorId, output: bioimageio.spec.model.v0_5.TensorId, gain: Union[float, xarray.core.dataarray.DataArray] = 1.0, offset: Union[float, xarray.core.dataarray.DataArray] = 0.0) + + +
    + + + + +
    +
    +
    + gain: Union[float, xarray.core.dataarray.DataArray] = +1.0 + + +
    + + +

    multiplicative factor

    +
    + + +
    +
    +
    + offset: Union[float, xarray.core.dataarray.DataArray] = +0.0 + + +
    + + +

    additive term

    +
    + + +
    +
    + +
    + + def + get_output_shape( self, input_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int]) -> Mapping[bioimageio.spec.model.v0_5.AxisId, int]: + + + +
    + +
    291    def get_output_shape(
    +292        self, input_shape: Mapping[AxisId, int]
    +293    ) -> Mapping[AxisId, int]:
    +294        return input_shape
    +
    + + + + +
    +
    + +
    +
    @classmethod
    + + def + from_proc_descr( cls, descr: Union[bioimageio.spec.model.v0_4.ScaleLinearDescr, bioimageio.spec.model.v0_5.ScaleLinearDescr], member_id: bioimageio.spec.model.v0_5.TensorId) -> Self: + + + +
    + +
    296    @classmethod
    +297    def from_proc_descr(
    +298        cls,
    +299        descr: Union[v0_4.ScaleLinearDescr, v0_5.ScaleLinearDescr],
    +300        member_id: MemberId,
    +301    ) -> Self:
    +302        kwargs = descr.kwargs
    +303        if isinstance(kwargs, v0_5.ScaleLinearAlongAxisKwargs):
    +304            axis = kwargs.axis
    +305        elif isinstance(kwargs, (v0_4.ScaleLinearKwargs, v0_5.ScaleLinearKwargs)):
    +306            axis = None
    +307        else:
    +308            assert_never(kwargs)
    +309
    +310        if axis:
    +311            gain = xr.DataArray(np.atleast_1d(kwargs.gain), dims=axis)
    +312            offset = xr.DataArray(np.atleast_1d(kwargs.offset), dims=axis)
    +313        else:
    +314            assert (
    +315                isinstance(kwargs.gain, (float, int)) or len(kwargs.gain) == 1
    +316            ), kwargs.gain
    +317            gain = (
    +318                kwargs.gain if isinstance(kwargs.gain, (float, int)) else kwargs.gain[0]
    +319            )
    +320            assert isinstance(kwargs.offset, (float, int)) or len(kwargs.offset) == 1
    +321            offset = (
    +322                kwargs.offset
    +323                if isinstance(kwargs.offset, (float, int))
    +324                else kwargs.offset[0]
    +325            )
    +326
    +327        return cls(input=member_id, output=member_id, gain=gain, offset=offset)
    +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    +
    @dataclass
    + + class + ScaleMeanVariance(_SimpleOperator): + + + +
    + +
    330@dataclass
    +331class ScaleMeanVariance(_SimpleOperator):
    +332    axes: Optional[Sequence[AxisId]] = None
    +333    reference_tensor: Optional[MemberId] = None
    +334    eps: float = 1e-6
    +335    mean: Union[SampleMean, DatasetMean] = field(init=False)
    +336    std: Union[SampleStd, DatasetStd] = field(init=False)
    +337    ref_mean: Union[SampleMean, DatasetMean] = field(init=False)
    +338    ref_std: Union[SampleStd, DatasetStd] = field(init=False)
    +339
    +340    @property
    +341    def required_measures(self):
    +342        return {self.mean, self.std, self.ref_mean, self.ref_std}
    +343
    +344    def __post_init__(self):
    +345        axes = None if self.axes is None else tuple(self.axes)
    +346        ref_tensor = self.reference_tensor or self.input
    +347        if axes is None or AxisId("batch") not in axes:
    +348            Mean = SampleMean
    +349            Std = SampleStd
    +350        else:
    +351            Mean = DatasetMean
    +352            Std = DatasetStd
    +353
    +354        self.mean = Mean(member_id=self.input, axes=axes)
    +355        self.std = Std(member_id=self.input, axes=axes)
    +356        self.ref_mean = Mean(member_id=ref_tensor, axes=axes)
    +357        self.ref_std = Std(member_id=ref_tensor, axes=axes)
    +358
    +359    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +360        mean = stat[self.mean]
    +361        std = stat[self.std] + self.eps
    +362        ref_mean = stat[self.ref_mean]
    +363        ref_std = stat[self.ref_std] + self.eps
    +364        return (input - mean) / std * ref_std + ref_mean
    +365
    +366    def get_output_shape(
    +367        self, input_shape: Mapping[AxisId, int]
    +368    ) -> Mapping[AxisId, int]:
    +369        return input_shape
    +370
    +371    @classmethod
    +372    def from_proc_descr(
    +373        cls,
    +374        descr: Union[v0_4.ScaleMeanVarianceDescr, v0_5.ScaleMeanVarianceDescr],
    +375        member_id: MemberId,
    +376    ) -> Self:
    +377        kwargs = descr.kwargs
    +378        _, axes = _get_axes(descr.kwargs)
    +379
    +380        return cls(
    +381            input=member_id,
    +382            output=member_id,
    +383            reference_tensor=MemberId(str(kwargs.reference_tensor)),
    +384            axes=axes,
    +385            eps=kwargs.eps,
    +386        )
    +
    + + + + +
    +
    + + ScaleMeanVariance( input: bioimageio.spec.model.v0_5.TensorId, output: bioimageio.spec.model.v0_5.TensorId, axes: Optional[Sequence[bioimageio.spec.model.v0_5.AxisId]] = None, reference_tensor: Optional[bioimageio.spec.model.v0_5.TensorId] = None, eps: float = 1e-06) + + +
    + + + + +
    +
    +
    + axes: Optional[Sequence[bioimageio.spec.model.v0_5.AxisId]] = +None + + +
    + + + + +
    +
    +
    + reference_tensor: Optional[bioimageio.spec.model.v0_5.TensorId] = +None + + +
    + + + + +
    +
    +
    + eps: float = +1e-06 + + +
    + + + + +
    + + + + +
    + +
    + required_measures + + + +
    + +
    340    @property
    +341    def required_measures(self):
    +342        return {self.mean, self.std, self.ref_mean, self.ref_std}
    +
    + + + + +
    +
    + +
    + + def + get_output_shape( self, input_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int]) -> Mapping[bioimageio.spec.model.v0_5.AxisId, int]: + + + +
    + +
    366    def get_output_shape(
    +367        self, input_shape: Mapping[AxisId, int]
    +368    ) -> Mapping[AxisId, int]:
    +369        return input_shape
    +
    + + + + +
    +
    + +
    +
    @classmethod
    + + def + from_proc_descr( cls, descr: Union[bioimageio.spec.model.v0_4.ScaleMeanVarianceDescr, bioimageio.spec.model.v0_5.ScaleMeanVarianceDescr], member_id: bioimageio.spec.model.v0_5.TensorId) -> Self: + + + +
    + +
    371    @classmethod
    +372    def from_proc_descr(
    +373        cls,
    +374        descr: Union[v0_4.ScaleMeanVarianceDescr, v0_5.ScaleMeanVarianceDescr],
    +375        member_id: MemberId,
    +376    ) -> Self:
    +377        kwargs = descr.kwargs
    +378        _, axes = _get_axes(descr.kwargs)
    +379
    +380        return cls(
    +381            input=member_id,
    +382            output=member_id,
    +383            reference_tensor=MemberId(str(kwargs.reference_tensor)),
    +384            axes=axes,
    +385            eps=kwargs.eps,
    +386        )
    +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    +
    @dataclass
    + + class + ScaleRange(_SimpleOperator): + + + +
    + +
    411@dataclass
    +412class ScaleRange(_SimpleOperator):
    +413    lower_percentile: InitVar[Optional[Union[SampleQuantile, DatasetPercentile]]] = None
    +414    upper_percentile: InitVar[Optional[Union[SampleQuantile, DatasetPercentile]]] = None
    +415    lower: Union[SampleQuantile, DatasetPercentile] = field(init=False)
    +416    upper: Union[SampleQuantile, DatasetPercentile] = field(init=False)
    +417
    +418    eps: float = 1e-6
    +419
    +420    def __post_init__(
    +421        self,
    +422        lower_percentile: Optional[Union[SampleQuantile, DatasetPercentile]],
    +423        upper_percentile: Optional[Union[SampleQuantile, DatasetPercentile]],
    +424    ):
    +425        if lower_percentile is None:
    +426            tid = self.input if upper_percentile is None else upper_percentile.member_id
    +427            self.lower = DatasetPercentile(q=0.0, member_id=tid)
    +428        else:
    +429            self.lower = lower_percentile
    +430
    +431        if upper_percentile is None:
    +432            self.upper = DatasetPercentile(q=1.0, member_id=self.lower.member_id)
    +433        else:
    +434            self.upper = upper_percentile
    +435
    +436        assert self.lower.member_id == self.upper.member_id
    +437        assert self.lower.q < self.upper.q
    +438        assert self.lower.axes == self.upper.axes
    +439
    +440    @property
    +441    def required_measures(self):
    +442        return {self.lower, self.upper}
    +443
    +444    def get_output_shape(
    +445        self, input_shape: Mapping[AxisId, int]
    +446    ) -> Mapping[AxisId, int]:
    +447        return input_shape
    +448
    +449    @classmethod
    +450    def from_proc_descr(
    +451        cls,
    +452        descr: Union[v0_4.ScaleRangeDescr, v0_5.ScaleRangeDescr],
    +453        member_id: MemberId,
    +454    ):
    +455        kwargs = descr.kwargs
    +456        ref_tensor = (
    +457            member_id
    +458            if kwargs.reference_tensor is None
    +459            else MemberId(str(kwargs.reference_tensor))
    +460        )
    +461        dataset_mode, axes = _get_axes(descr.kwargs)
    +462        if dataset_mode:
    +463            Percentile = DatasetPercentile
    +464        else:
    +465            Percentile = SampleQuantile
    +466
    +467        return cls(
    +468            input=member_id,
    +469            output=member_id,
    +470            lower_percentile=Percentile(
    +471                q=kwargs.min_percentile / 100, axes=axes, member_id=ref_tensor
    +472            ),
    +473            upper_percentile=Percentile(
    +474                q=kwargs.max_percentile / 100, axes=axes, member_id=ref_tensor
    +475            ),
    +476        )
    +477
    +478    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +479        lower = stat[self.lower]
    +480        upper = stat[self.upper]
    +481        return (input - lower) / (upper - lower + self.eps)
    +482
    +483    def get_descr(self):
    +484        assert self.lower.axes == self.upper.axes
    +485        assert self.lower.member_id == self.upper.member_id
    +486
    +487        return v0_5.ScaleRangeDescr(
    +488            kwargs=v0_5.ScaleRangeKwargs(
    +489                axes=self.lower.axes,
    +490                min_percentile=self.lower.q * 100,
    +491                max_percentile=self.upper.q * 100,
    +492                eps=self.eps,
    +493                reference_tensor=self.lower.member_id,
    +494            )
    +495        )
    +
    + + + + +
    +
    + + ScaleRange( input: bioimageio.spec.model.v0_5.TensorId, output: bioimageio.spec.model.v0_5.TensorId, lower_percentile: dataclasses.InitVar[typing.Union[bioimageio.core.stat_measures.SampleQuantile, bioimageio.core.stat_measures.DatasetPercentile, NoneType]] = None, upper_percentile: dataclasses.InitVar[typing.Union[bioimageio.core.stat_measures.SampleQuantile, bioimageio.core.stat_measures.DatasetPercentile, NoneType]] = None, eps: float = 1e-06) + + +
    + + + + +
    +
    +
    + lower_percentile: dataclasses.InitVar[typing.Union[bioimageio.core.stat_measures.SampleQuantile, bioimageio.core.stat_measures.DatasetPercentile, NoneType]] = +None + + +
    + + + + +
    +
    +
    + upper_percentile: dataclasses.InitVar[typing.Union[bioimageio.core.stat_measures.SampleQuantile, bioimageio.core.stat_measures.DatasetPercentile, NoneType]] = +None + + +
    + + + + +
    + + +
    +
    + eps: float = +1e-06 + + +
    + + + + +
    +
    + +
    + required_measures + + + +
    + +
    440    @property
    +441    def required_measures(self):
    +442        return {self.lower, self.upper}
    +
    + + + + +
    +
    + +
    + + def + get_output_shape( self, input_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int]) -> Mapping[bioimageio.spec.model.v0_5.AxisId, int]: + + + +
    + +
    444    def get_output_shape(
    +445        self, input_shape: Mapping[AxisId, int]
    +446    ) -> Mapping[AxisId, int]:
    +447        return input_shape
    +
    + + + + +
    +
    + +
    +
    @classmethod
    + + def + from_proc_descr( cls, descr: Union[bioimageio.spec.model.v0_4.ScaleRangeDescr, bioimageio.spec.model.v0_5.ScaleRangeDescr], member_id: bioimageio.spec.model.v0_5.TensorId): + + + +
    + +
    449    @classmethod
    +450    def from_proc_descr(
    +451        cls,
    +452        descr: Union[v0_4.ScaleRangeDescr, v0_5.ScaleRangeDescr],
    +453        member_id: MemberId,
    +454    ):
    +455        kwargs = descr.kwargs
    +456        ref_tensor = (
    +457            member_id
    +458            if kwargs.reference_tensor is None
    +459            else MemberId(str(kwargs.reference_tensor))
    +460        )
    +461        dataset_mode, axes = _get_axes(descr.kwargs)
    +462        if dataset_mode:
    +463            Percentile = DatasetPercentile
    +464        else:
    +465            Percentile = SampleQuantile
    +466
    +467        return cls(
    +468            input=member_id,
    +469            output=member_id,
    +470            lower_percentile=Percentile(
    +471                q=kwargs.min_percentile / 100, axes=axes, member_id=ref_tensor
    +472            ),
    +473            upper_percentile=Percentile(
    +474                q=kwargs.max_percentile / 100, axes=axes, member_id=ref_tensor
    +475            ),
    +476        )
    +
    + + + + +
    +
    + +
    + + def + get_descr(self): + + + +
    + +
    483    def get_descr(self):
    +484        assert self.lower.axes == self.upper.axes
    +485        assert self.lower.member_id == self.upper.member_id
    +486
    +487        return v0_5.ScaleRangeDescr(
    +488            kwargs=v0_5.ScaleRangeKwargs(
    +489                axes=self.lower.axes,
    +490                min_percentile=self.lower.q * 100,
    +491                max_percentile=self.upper.q * 100,
    +492                eps=self.eps,
    +493                reference_tensor=self.lower.member_id,
    +494            )
    +495        )
    +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    +
    @dataclass
    + + class + Sigmoid(_SimpleOperator): + + + +
    + +
    498@dataclass
    +499class Sigmoid(_SimpleOperator):
    +500    """1 / (1 + e^(-input))."""
    +501
    +502    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +503        return Tensor(1.0 / (1.0 + np.exp(-input)), dims=input.dims)
    +504
    +505    @property
    +506    def required_measures(self) -> Collection[Measure]:
    +507        return {}
    +508
    +509    def get_output_shape(
    +510        self, input_shape: Mapping[AxisId, int]
    +511    ) -> Mapping[AxisId, int]:
    +512        return input_shape
    +513
    +514    @classmethod
    +515    def from_proc_descr(
    +516        cls, descr: Union[v0_4.SigmoidDescr, v0_5.SigmoidDescr], member_id: MemberId
    +517    ) -> Self:
    +518        assert isinstance(descr, (v0_4.SigmoidDescr, v0_5.SigmoidDescr))
    +519        return cls(input=member_id, output=member_id)
    +520
    +521    def get_descr(self):
    +522        return v0_5.SigmoidDescr()
    +
    + + +

    1 / (1 + e^(-input)).

    +
    + + +
    + + + + + +
    +
    + +
    + required_measures: Collection[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)]] + + + +
    + +
    505    @property
    +506    def required_measures(self) -> Collection[Measure]:
    +507        return {}
    +
    + + + + +
    +
    + +
    + + def + get_output_shape( self, input_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int]) -> Mapping[bioimageio.spec.model.v0_5.AxisId, int]: + + + +
    + +
    509    def get_output_shape(
    +510        self, input_shape: Mapping[AxisId, int]
    +511    ) -> Mapping[AxisId, int]:
    +512        return input_shape
    +
    + + + + +
    +
    + +
    +
    @classmethod
    + + def + from_proc_descr( cls, descr: Union[bioimageio.spec.model.v0_4.SigmoidDescr, bioimageio.spec.model.v0_5.SigmoidDescr], member_id: bioimageio.spec.model.v0_5.TensorId) -> Self: + + + +
    + +
    514    @classmethod
    +515    def from_proc_descr(
    +516        cls, descr: Union[v0_4.SigmoidDescr, v0_5.SigmoidDescr], member_id: MemberId
    +517    ) -> Self:
    +518        assert isinstance(descr, (v0_4.SigmoidDescr, v0_5.SigmoidDescr))
    +519        return cls(input=member_id, output=member_id)
    +
    + + + + +
    +
    + +
    + + def + get_descr(self): + + + +
    + +
    521    def get_descr(self):
    +522        return v0_5.SigmoidDescr()
    +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    +
    @dataclass
    + + class + ZeroMeanUnitVariance(_SimpleOperator): + + + +
    + +
    525@dataclass
    +526class ZeroMeanUnitVariance(_SimpleOperator):
    +527    """normalize to zero mean, unit variance."""
    +528
    +529    mean: MeanMeasure
    +530    std: StdMeasure
    +531
    +532    eps: float = 1e-6
    +533
    +534    def __post_init__(self):
    +535        assert self.mean.axes == self.std.axes
    +536
    +537    @property
    +538    def required_measures(self) -> Set[Union[MeanMeasure, StdMeasure]]:
    +539        return {self.mean, self.std}
    +540
    +541    def get_output_shape(
    +542        self, input_shape: Mapping[AxisId, int]
    +543    ) -> Mapping[AxisId, int]:
    +544        return input_shape
    +545
    +546    @classmethod
    +547    def from_proc_descr(
    +548        cls,
    +549        descr: Union[v0_4.ZeroMeanUnitVarianceDescr, v0_5.ZeroMeanUnitVarianceDescr],
    +550        member_id: MemberId,
    +551    ):
    +552        dataset_mode, axes = _get_axes(descr.kwargs)
    +553
    +554        if dataset_mode:
    +555            Mean = DatasetMean
    +556            Std = DatasetStd
    +557        else:
    +558            Mean = SampleMean
    +559            Std = SampleStd
    +560
    +561        return cls(
    +562            input=member_id,
    +563            output=member_id,
    +564            mean=Mean(axes=axes, member_id=member_id),
    +565            std=Std(axes=axes, member_id=member_id),
    +566        )
    +567
    +568    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +569        mean = stat[self.mean]
    +570        std = stat[self.std]
    +571        return (input - mean) / (std + self.eps)
    +572
    +573    def get_descr(self):
    +574        return v0_5.ZeroMeanUnitVarianceDescr(
    +575            kwargs=v0_5.ZeroMeanUnitVarianceKwargs(axes=self.mean.axes, eps=self.eps)
    +576        )
    +
    + + +

    normalize to zero mean, unit variance.

    +
    + + + + + +
    +
    + eps: float = +1e-06 + + +
    + + + + +
    +
    + + + +
    537    @property
    +538    def required_measures(self) -> Set[Union[MeanMeasure, StdMeasure]]:
    +539        return {self.mean, self.std}
    +
    + + + + +
    +
    + +
    + + def + get_output_shape( self, input_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int]) -> Mapping[bioimageio.spec.model.v0_5.AxisId, int]: + + + +
    + +
    541    def get_output_shape(
    +542        self, input_shape: Mapping[AxisId, int]
    +543    ) -> Mapping[AxisId, int]:
    +544        return input_shape
    +
    + + + + +
    +
    + +
    +
    @classmethod
    + + def + from_proc_descr( cls, descr: Union[bioimageio.spec.model.v0_4.ZeroMeanUnitVarianceDescr, bioimageio.spec.model.v0_5.ZeroMeanUnitVarianceDescr], member_id: bioimageio.spec.model.v0_5.TensorId): + + + +
    + +
    546    @classmethod
    +547    def from_proc_descr(
    +548        cls,
    +549        descr: Union[v0_4.ZeroMeanUnitVarianceDescr, v0_5.ZeroMeanUnitVarianceDescr],
    +550        member_id: MemberId,
    +551    ):
    +552        dataset_mode, axes = _get_axes(descr.kwargs)
    +553
    +554        if dataset_mode:
    +555            Mean = DatasetMean
    +556            Std = DatasetStd
    +557        else:
    +558            Mean = SampleMean
    +559            Std = SampleStd
    +560
    +561        return cls(
    +562            input=member_id,
    +563            output=member_id,
    +564            mean=Mean(axes=axes, member_id=member_id),
    +565            std=Std(axes=axes, member_id=member_id),
    +566        )
    +
    + + + + +
    +
    + +
    + + def + get_descr(self): + + + +
    + +
    573    def get_descr(self):
    +574        return v0_5.ZeroMeanUnitVarianceDescr(
    +575            kwargs=v0_5.ZeroMeanUnitVarianceKwargs(axes=self.mean.axes, eps=self.eps)
    +576        )
    +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    +
    @dataclass
    + + class + FixedZeroMeanUnitVariance(_SimpleOperator): + + + +
    + +
    579@dataclass
    +580class FixedZeroMeanUnitVariance(_SimpleOperator):
    +581    """normalize to zero mean, unit variance with precomputed values."""
    +582
    +583    mean: Union[float, xr.DataArray]
    +584    std: Union[float, xr.DataArray]
    +585
    +586    eps: float = 1e-6
    +587
    +588    def __post_init__(self):
    +589        assert (
    +590            isinstance(self.mean, (int, float))
    +591            or isinstance(self.std, (int, float))
    +592            or self.mean.dims == self.std.dims
    +593        )
    +594
    +595    def get_output_shape(
    +596        self, input_shape: Mapping[AxisId, int]
    +597    ) -> Mapping[AxisId, int]:
    +598        return input_shape
    +599
    +600    @classmethod
    +601    def from_proc_descr(
    +602        cls,
    +603        descr: v0_5.FixedZeroMeanUnitVarianceDescr,
    +604        member_id: MemberId,
    +605    ) -> Self:
    +606        if isinstance(descr.kwargs, v0_5.FixedZeroMeanUnitVarianceKwargs):
    +607            dims = None
    +608        elif isinstance(descr.kwargs, v0_5.FixedZeroMeanUnitVarianceAlongAxisKwargs):
    +609            dims = (descr.kwargs.axis,)
    +610        else:
    +611            assert_never(descr.kwargs)
    +612
    +613        return cls(
    +614            input=member_id,
    +615            output=member_id,
    +616            mean=xr.DataArray(descr.kwargs.mean, dims=dims),
    +617            std=xr.DataArray(descr.kwargs.std, dims=dims),
    +618        )
    +619
    +620    def get_descr(self):
    +621        if isinstance(self.mean, (int, float)):
    +622            assert isinstance(self.std, (int, float))
    +623            kwargs = v0_5.FixedZeroMeanUnitVarianceKwargs(mean=self.mean, std=self.std)
    +624        else:
    +625            assert isinstance(self.std, xr.DataArray)
    +626            assert len(self.mean.dims) == 1
    +627            kwargs = v0_5.FixedZeroMeanUnitVarianceAlongAxisKwargs(
    +628                axis=AxisId(str(self.mean.dims[0])),
    +629                mean=list(self.mean),
    +630                std=list(self.std),
    +631            )
    +632
    +633        return v0_5.FixedZeroMeanUnitVarianceDescr(kwargs=kwargs)
    +634
    +635    def _apply(self, input: Tensor, stat: Stat) -> Tensor:
    +636        return (input - self.mean) / (self.std + self.eps)
    +
    + + +

    normalize to zero mean, unit variance with precomputed values.

    +
    + + +
    +
    + + FixedZeroMeanUnitVariance( input: bioimageio.spec.model.v0_5.TensorId, output: bioimageio.spec.model.v0_5.TensorId, mean: Union[float, xarray.core.dataarray.DataArray], std: Union[float, xarray.core.dataarray.DataArray], eps: float = 1e-06) + + +
    + + + + +
    +
    +
    + mean: Union[float, xarray.core.dataarray.DataArray] + + +
    + + + + +
    +
    +
    + std: Union[float, xarray.core.dataarray.DataArray] + + +
    + + + + +
    +
    +
    + eps: float = +1e-06 + + +
    + + + + +
    +
    + +
    + + def + get_output_shape( self, input_shape: Mapping[bioimageio.spec.model.v0_5.AxisId, int]) -> Mapping[bioimageio.spec.model.v0_5.AxisId, int]: + + + +
    + +
    595    def get_output_shape(
    +596        self, input_shape: Mapping[AxisId, int]
    +597    ) -> Mapping[AxisId, int]:
    +598        return input_shape
    +
    + + + + +
    +
    + +
    +
    @classmethod
    + + def + from_proc_descr( cls, descr: bioimageio.spec.model.v0_5.FixedZeroMeanUnitVarianceDescr, member_id: bioimageio.spec.model.v0_5.TensorId) -> Self: + + + +
    + +
    600    @classmethod
    +601    def from_proc_descr(
    +602        cls,
    +603        descr: v0_5.FixedZeroMeanUnitVarianceDescr,
    +604        member_id: MemberId,
    +605    ) -> Self:
    +606        if isinstance(descr.kwargs, v0_5.FixedZeroMeanUnitVarianceKwargs):
    +607            dims = None
    +608        elif isinstance(descr.kwargs, v0_5.FixedZeroMeanUnitVarianceAlongAxisKwargs):
    +609            dims = (descr.kwargs.axis,)
    +610        else:
    +611            assert_never(descr.kwargs)
    +612
    +613        return cls(
    +614            input=member_id,
    +615            output=member_id,
    +616            mean=xr.DataArray(descr.kwargs.mean, dims=dims),
    +617            std=xr.DataArray(descr.kwargs.std, dims=dims),
    +618        )
    +
    + + + + +
    +
    + +
    + + def + get_descr(self): + + + +
    + +
    620    def get_descr(self):
    +621        if isinstance(self.mean, (int, float)):
    +622            assert isinstance(self.std, (int, float))
    +623            kwargs = v0_5.FixedZeroMeanUnitVarianceKwargs(mean=self.mean, std=self.std)
    +624        else:
    +625            assert isinstance(self.std, xr.DataArray)
    +626            assert len(self.mean.dims) == 1
    +627            kwargs = v0_5.FixedZeroMeanUnitVarianceAlongAxisKwargs(
    +628                axis=AxisId(str(self.mean.dims[0])),
    +629                mean=list(self.mean),
    +630                std=list(self.std),
    +631            )
    +632
    +633        return v0_5.FixedZeroMeanUnitVarianceDescr(kwargs=kwargs)
    +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    +
    + ProcDescr = + + typing.Union[typing.Annotated[typing.Union[bioimageio.spec.model.v0_4.BinarizeDescr, bioimageio.spec.model.v0_4.ClipDescr, bioimageio.spec.model.v0_4.ScaleLinearDescr, bioimageio.spec.model.v0_4.SigmoidDescr, bioimageio.spec.model.v0_4.ZeroMeanUnitVarianceDescr, bioimageio.spec.model.v0_4.ScaleRangeDescr], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[bioimageio.spec.model.v0_4.BinarizeDescr, bioimageio.spec.model.v0_4.ClipDescr, bioimageio.spec.model.v0_4.ScaleLinearDescr, bioimageio.spec.model.v0_4.SigmoidDescr, bioimageio.spec.model.v0_4.ZeroMeanUnitVarianceDescr, bioimageio.spec.model.v0_4.ScaleRangeDescr, bioimageio.spec.model.v0_4.ScaleMeanVarianceDescr], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[bioimageio.spec.model.v0_5.BinarizeDescr, bioimageio.spec.model.v0_5.ClipDescr, bioimageio.spec.model.v0_5.EnsureDtypeDescr, bioimageio.spec.model.v0_5.ScaleLinearDescr, bioimageio.spec.model.v0_5.SigmoidDescr, bioimageio.spec.model.v0_5.FixedZeroMeanUnitVarianceDescr, bioimageio.spec.model.v0_5.ZeroMeanUnitVarianceDescr, bioimageio.spec.model.v0_5.ScaleRangeDescr], Discriminator(discriminator='id', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[bioimageio.spec.model.v0_5.BinarizeDescr, bioimageio.spec.model.v0_5.ClipDescr, bioimageio.spec.model.v0_5.EnsureDtypeDescr, bioimageio.spec.model.v0_5.ScaleLinearDescr, bioimageio.spec.model.v0_5.SigmoidDescr, bioimageio.spec.model.v0_5.FixedZeroMeanUnitVarianceDescr, bioimageio.spec.model.v0_5.ZeroMeanUnitVarianceDescr, bioimageio.spec.model.v0_5.ScaleRangeDescr, bioimageio.spec.model.v0_5.ScaleMeanVarianceDescr], Discriminator(discriminator='id', custom_error_type=None, custom_error_message=None, custom_error_context=None)]] + + +
    + + + + +
    +
    + + + + + +
    +
    + +
    + + def + get_proc_class( proc_spec: Union[Annotated[Union[bioimageio.spec.model.v0_4.BinarizeDescr, bioimageio.spec.model.v0_4.ClipDescr, bioimageio.spec.model.v0_4.ScaleLinearDescr, bioimageio.spec.model.v0_4.SigmoidDescr, bioimageio.spec.model.v0_4.ZeroMeanUnitVarianceDescr, bioimageio.spec.model.v0_4.ScaleRangeDescr], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.BinarizeDescr, bioimageio.spec.model.v0_4.ClipDescr, bioimageio.spec.model.v0_4.ScaleLinearDescr, bioimageio.spec.model.v0_4.SigmoidDescr, bioimageio.spec.model.v0_4.ZeroMeanUnitVarianceDescr, bioimageio.spec.model.v0_4.ScaleRangeDescr, bioimageio.spec.model.v0_4.ScaleMeanVarianceDescr], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BinarizeDescr, bioimageio.spec.model.v0_5.ClipDescr, bioimageio.spec.model.v0_5.EnsureDtypeDescr, bioimageio.spec.model.v0_5.ScaleLinearDescr, bioimageio.spec.model.v0_5.SigmoidDescr, bioimageio.spec.model.v0_5.FixedZeroMeanUnitVarianceDescr, bioimageio.spec.model.v0_5.ZeroMeanUnitVarianceDescr, bioimageio.spec.model.v0_5.ScaleRangeDescr], Discriminator(discriminator='id', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BinarizeDescr, bioimageio.spec.model.v0_5.ClipDescr, bioimageio.spec.model.v0_5.EnsureDtypeDescr, bioimageio.spec.model.v0_5.ScaleLinearDescr, bioimageio.spec.model.v0_5.SigmoidDescr, bioimageio.spec.model.v0_5.FixedZeroMeanUnitVarianceDescr, bioimageio.spec.model.v0_5.ZeroMeanUnitVarianceDescr, bioimageio.spec.model.v0_5.ScaleRangeDescr, bioimageio.spec.model.v0_5.ScaleMeanVarianceDescr], Discriminator(discriminator='id', custom_error_type=None, custom_error_message=None, custom_error_context=None)]]): + + + +
    + +
    661def get_proc_class(proc_spec: ProcDescr):
    +662    if isinstance(proc_spec, (v0_4.BinarizeDescr, v0_5.BinarizeDescr)):
    +663        return Binarize
    +664    elif isinstance(proc_spec, (v0_4.ClipDescr, v0_5.ClipDescr)):
    +665        return Clip
    +666    elif isinstance(proc_spec, v0_5.EnsureDtypeDescr):
    +667        return EnsureDtype
    +668    elif isinstance(proc_spec, v0_5.FixedZeroMeanUnitVarianceDescr):
    +669        return FixedZeroMeanUnitVariance
    +670    elif isinstance(proc_spec, (v0_4.ScaleLinearDescr, v0_5.ScaleLinearDescr)):
    +671        return ScaleLinear
    +672    elif isinstance(
    +673        proc_spec, (v0_4.ScaleMeanVarianceDescr, v0_5.ScaleMeanVarianceDescr)
    +674    ):
    +675        return ScaleMeanVariance
    +676    elif isinstance(proc_spec, (v0_4.ScaleRangeDescr, v0_5.ScaleRangeDescr)):
    +677        return ScaleRange
    +678    elif isinstance(proc_spec, (v0_4.SigmoidDescr, v0_5.SigmoidDescr)):
    +679        return Sigmoid
    +680    elif (
    +681        isinstance(proc_spec, v0_4.ZeroMeanUnitVarianceDescr)
    +682        and proc_spec.kwargs.mode == "fixed"
    +683    ):
    +684        return FixedZeroMeanUnitVariance
    +685    elif isinstance(
    +686        proc_spec,
    +687        (v0_4.ZeroMeanUnitVarianceDescr, v0_5.ZeroMeanUnitVarianceDescr),
    +688    ):
    +689        return ZeroMeanUnitVariance
    +690    else:
    +691        assert_never(proc_spec)
    +
    + + + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/core/proc_setup.html b/bioimageio/core/proc_setup.html new file mode 100644 index 00000000..3b041af8 --- /dev/null +++ b/bioimageio/core/proc_setup.html @@ -0,0 +1,912 @@ + + + + + + + bioimageio.core.proc_setup API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.core.proc_setup

    + + + + + + +
      1from typing import (
    +  2    Iterable,
    +  3    List,
    +  4    Mapping,
    +  5    NamedTuple,
    +  6    Optional,
    +  7    Sequence,
    +  8    Set,
    +  9    Union,
    + 10)
    + 11
    + 12from typing_extensions import assert_never
    + 13
    + 14from bioimageio.spec.model import AnyModelDescr, v0_4, v0_5
    + 15from bioimageio.spec.model.v0_5 import TensorId
    + 16
    + 17from .digest_spec import get_member_ids
    + 18from .proc_ops import (
    + 19    AddKnownDatasetStats,
    + 20    Processing,
    + 21    UpdateStats,
    + 22    get_proc_class,
    + 23)
    + 24from .sample import Sample
    + 25from .stat_calculators import StatsCalculator
    + 26from .stat_measures import (
    + 27    DatasetMeasure,
    + 28    DatasetMeasureBase,
    + 29    Measure,
    + 30    MeasureValue,
    + 31    SampleMeasure,
    + 32    SampleMeasureBase,
    + 33)
    + 34
    + 35TensorDescr = Union[
    + 36    v0_4.InputTensorDescr,
    + 37    v0_4.OutputTensorDescr,
    + 38    v0_5.InputTensorDescr,
    + 39    v0_5.OutputTensorDescr,
    + 40]
    + 41
    + 42
    + 43class PreAndPostprocessing(NamedTuple):
    + 44    pre: List[Processing]
    + 45    post: List[Processing]
    + 46
    + 47
    + 48class _SetupProcessing(NamedTuple):
    + 49    pre: List[Processing]
    + 50    post: List[Processing]
    + 51    pre_measures: Set[Measure]
    + 52    post_measures: Set[Measure]
    + 53
    + 54
    + 55def setup_pre_and_postprocessing(
    + 56    model: AnyModelDescr,
    + 57    dataset_for_initial_statistics: Iterable[Sample],
    + 58    keep_updating_initial_dataset_stats: bool = False,
    + 59    fixed_dataset_stats: Optional[Mapping[DatasetMeasure, MeasureValue]] = None,
    + 60) -> PreAndPostprocessing:
    + 61    """
    + 62    Get pre- and postprocessing operators for a `model` description.
    + 63    userd in `bioimageio.core.create_prediction_pipeline"""
    + 64    prep, post, prep_meas, post_meas = _prepare_setup_pre_and_postprocessing(model)
    + 65
    + 66    missing_dataset_stats = {
    + 67        m
    + 68        for m in prep_meas | post_meas
    + 69        if fixed_dataset_stats is None or m not in fixed_dataset_stats
    + 70    }
    + 71    if missing_dataset_stats:
    + 72        initial_stats_calc = StatsCalculator(missing_dataset_stats)
    + 73        for sample in dataset_for_initial_statistics:
    + 74            initial_stats_calc.update(sample)
    + 75
    + 76        initial_stats = initial_stats_calc.finalize()
    + 77    else:
    + 78        initial_stats = {}
    + 79
    + 80    prep.insert(
    + 81        0,
    + 82        UpdateStats(
    + 83            StatsCalculator(prep_meas, initial_stats),
    + 84            keep_updating_initial_dataset_stats=keep_updating_initial_dataset_stats,
    + 85        ),
    + 86    )
    + 87    if post_meas:
    + 88        post.insert(
    + 89            0,
    + 90            UpdateStats(
    + 91                StatsCalculator(post_meas, initial_stats),
    + 92                keep_updating_initial_dataset_stats=keep_updating_initial_dataset_stats,
    + 93            ),
    + 94        )
    + 95
    + 96    if fixed_dataset_stats:
    + 97        prep.insert(0, AddKnownDatasetStats(fixed_dataset_stats))
    + 98        post.insert(0, AddKnownDatasetStats(fixed_dataset_stats))
    + 99
    +100    return PreAndPostprocessing(prep, post)
    +101
    +102
    +103class RequiredMeasures(NamedTuple):
    +104    pre: Set[Measure]
    +105    post: Set[Measure]
    +106
    +107
    +108class RequiredDatasetMeasures(NamedTuple):
    +109    pre: Set[DatasetMeasure]
    +110    post: Set[DatasetMeasure]
    +111
    +112
    +113class RequiredSampleMeasures(NamedTuple):
    +114    pre: Set[SampleMeasure]
    +115    post: Set[SampleMeasure]
    +116
    +117
    +118def get_requried_measures(model: AnyModelDescr) -> RequiredMeasures:
    +119    s = _prepare_setup_pre_and_postprocessing(model)
    +120    return RequiredMeasures(s.pre_measures, s.post_measures)
    +121
    +122
    +123def get_required_dataset_measures(model: AnyModelDescr) -> RequiredDatasetMeasures:
    +124    s = _prepare_setup_pre_and_postprocessing(model)
    +125    return RequiredDatasetMeasures(
    +126        {m for m in s.pre_measures if isinstance(m, DatasetMeasureBase)},
    +127        {m for m in s.post_measures if isinstance(m, DatasetMeasureBase)},
    +128    )
    +129
    +130
    +131def get_requried_sample_measures(model: AnyModelDescr) -> RequiredSampleMeasures:
    +132    s = _prepare_setup_pre_and_postprocessing(model)
    +133    return RequiredSampleMeasures(
    +134        {m for m in s.pre_measures if isinstance(m, SampleMeasureBase)},
    +135        {m for m in s.post_measures if isinstance(m, SampleMeasureBase)},
    +136    )
    +137
    +138
    +139def _prepare_setup_pre_and_postprocessing(model: AnyModelDescr) -> _SetupProcessing:
    +140    pre_measures: Set[Measure] = set()
    +141    post_measures: Set[Measure] = set()
    +142
    +143    input_ids = set(get_member_ids(model.inputs))
    +144    output_ids = set(get_member_ids(model.outputs))
    +145
    +146    def prepare_procs(tensor_descrs: Sequence[TensorDescr]):
    +147        procs: List[Processing] = []
    +148        for t_descr in tensor_descrs:
    +149            if isinstance(t_descr, (v0_4.InputTensorDescr, v0_5.InputTensorDescr)):
    +150                proc_descrs: List[
    +151                    Union[
    +152                        v0_4.PreprocessingDescr,
    +153                        v0_5.PreprocessingDescr,
    +154                        v0_4.PostprocessingDescr,
    +155                        v0_5.PostprocessingDescr,
    +156                    ]
    +157                ] = list(t_descr.preprocessing)
    +158            elif isinstance(
    +159                t_descr,
    +160                (v0_4.OutputTensorDescr, v0_5.OutputTensorDescr),
    +161            ):
    +162                proc_descrs = list(t_descr.postprocessing)
    +163            else:
    +164                assert_never(t_descr)
    +165
    +166            if isinstance(t_descr, (v0_4.InputTensorDescr, v0_4.OutputTensorDescr)):
    +167                ensure_dtype = v0_5.EnsureDtypeDescr(
    +168                    kwargs=v0_5.EnsureDtypeKwargs(dtype=t_descr.data_type)
    +169                )
    +170                if isinstance(t_descr, v0_4.InputTensorDescr) and proc_descrs:
    +171                    proc_descrs.insert(0, ensure_dtype)
    +172
    +173                proc_descrs.append(ensure_dtype)
    +174
    +175            for proc_d in proc_descrs:
    +176                proc_class = get_proc_class(proc_d)
    +177                member_id = (
    +178                    TensorId(str(t_descr.name))
    +179                    if isinstance(t_descr, v0_4.TensorDescrBase)
    +180                    else t_descr.id
    +181                )
    +182                req = proc_class.from_proc_descr(
    +183                    proc_d, member_id  # pyright: ignore[reportArgumentType]
    +184                )
    +185                for m in req.required_measures:
    +186                    if m.member_id in input_ids:
    +187                        pre_measures.add(m)
    +188                    elif m.member_id in output_ids:
    +189                        post_measures.add(m)
    +190                    else:
    +191                        raise ValueError("When to raise ")
    +192                procs.append(req)
    +193        return procs
    +194
    +195    return _SetupProcessing(
    +196        pre=prepare_procs(model.inputs),
    +197        post=prepare_procs(model.outputs),
    +198        pre_measures=pre_measures,
    +199        post_measures=post_measures,
    +200    )
    +
    + + +
    +
    + + + + + +
    +
    + +
    + + class + PreAndPostprocessing(typing.NamedTuple): + + + +
    + +
    44class PreAndPostprocessing(NamedTuple):
    +45    pre: List[Processing]
    +46    post: List[Processing]
    +
    + + +

    PreAndPostprocessing(pre, post)

    +
    + + + + + +
    +
    + +
    + + def + setup_pre_and_postprocessing( model: Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], dataset_for_initial_statistics: Iterable[bioimageio.core.Sample], keep_updating_initial_dataset_stats: bool = False, fixed_dataset_stats: Optional[Mapping[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]]] = None) -> PreAndPostprocessing: + + + +
    + +
     56def setup_pre_and_postprocessing(
    + 57    model: AnyModelDescr,
    + 58    dataset_for_initial_statistics: Iterable[Sample],
    + 59    keep_updating_initial_dataset_stats: bool = False,
    + 60    fixed_dataset_stats: Optional[Mapping[DatasetMeasure, MeasureValue]] = None,
    + 61) -> PreAndPostprocessing:
    + 62    """
    + 63    Get pre- and postprocessing operators for a `model` description.
    + 64    userd in `bioimageio.core.create_prediction_pipeline"""
    + 65    prep, post, prep_meas, post_meas = _prepare_setup_pre_and_postprocessing(model)
    + 66
    + 67    missing_dataset_stats = {
    + 68        m
    + 69        for m in prep_meas | post_meas
    + 70        if fixed_dataset_stats is None or m not in fixed_dataset_stats
    + 71    }
    + 72    if missing_dataset_stats:
    + 73        initial_stats_calc = StatsCalculator(missing_dataset_stats)
    + 74        for sample in dataset_for_initial_statistics:
    + 75            initial_stats_calc.update(sample)
    + 76
    + 77        initial_stats = initial_stats_calc.finalize()
    + 78    else:
    + 79        initial_stats = {}
    + 80
    + 81    prep.insert(
    + 82        0,
    + 83        UpdateStats(
    + 84            StatsCalculator(prep_meas, initial_stats),
    + 85            keep_updating_initial_dataset_stats=keep_updating_initial_dataset_stats,
    + 86        ),
    + 87    )
    + 88    if post_meas:
    + 89        post.insert(
    + 90            0,
    + 91            UpdateStats(
    + 92                StatsCalculator(post_meas, initial_stats),
    + 93                keep_updating_initial_dataset_stats=keep_updating_initial_dataset_stats,
    + 94            ),
    + 95        )
    + 96
    + 97    if fixed_dataset_stats:
    + 98        prep.insert(0, AddKnownDatasetStats(fixed_dataset_stats))
    + 99        post.insert(0, AddKnownDatasetStats(fixed_dataset_stats))
    +100
    +101    return PreAndPostprocessing(prep, post)
    +
    + + +

    Get pre- and postprocessing operators for a model description. +userd in `bioimageio.core.create_prediction_pipeline

    +
    + + +
    +
    + +
    + + class + RequiredMeasures(typing.NamedTuple): + + + +
    + +
    104class RequiredMeasures(NamedTuple):
    +105    pre: Set[Measure]
    +106    post: Set[Measure]
    +
    + + +

    RequiredMeasures(pre, post)

    +
    + + +
    +
    + + RequiredMeasures( pre: Set[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], post: Set[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)]]) + + +
    + + +

    Create new instance of RequiredMeasures(pre, post)

    +
    + + +
    +
    +
    + pre: Set[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)]] + + +
    + + +

    Alias for field number 0

    +
    + + +
    +
    +
    + post: Set[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)]] + + +
    + + +

    Alias for field number 1

    +
    + + +
    +
    +
    + +
    + + class + RequiredDatasetMeasures(typing.NamedTuple): + + + +
    + +
    109class RequiredDatasetMeasures(NamedTuple):
    +110    pre: Set[DatasetMeasure]
    +111    post: Set[DatasetMeasure]
    +
    + + +

    RequiredDatasetMeasures(pre, post)

    +
    + + +
    +
    + + RequiredDatasetMeasures( pre: Set[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], post: Set[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]]) + + +
    + + +

    Create new instance of RequiredDatasetMeasures(pre, post)

    +
    + + +
    +
    +
    + pre: Set[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]] + + +
    + + +

    Alias for field number 0

    +
    + + +
    +
    +
    + post: Set[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]] + + +
    + + +

    Alias for field number 1

    +
    + + +
    +
    +
    + +
    + + class + RequiredSampleMeasures(typing.NamedTuple): + + + +
    + +
    114class RequiredSampleMeasures(NamedTuple):
    +115    pre: Set[SampleMeasure]
    +116    post: Set[SampleMeasure]
    +
    + + +

    RequiredSampleMeasures(pre, post)

    +
    + + +
    +
    + + RequiredSampleMeasures( pre: Set[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], post: Set[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]]) + + +
    + + +

    Create new instance of RequiredSampleMeasures(pre, post)

    +
    + + +
    +
    +
    + pre: Set[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]] + + +
    + + +

    Alias for field number 0

    +
    + + +
    +
    +
    + post: Set[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]] + + +
    + + +

    Alias for field number 1

    +
    + + +
    +
    +
    + +
    + + def + get_requried_measures( model: Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]) -> RequiredMeasures: + + + +
    + +
    119def get_requried_measures(model: AnyModelDescr) -> RequiredMeasures:
    +120    s = _prepare_setup_pre_and_postprocessing(model)
    +121    return RequiredMeasures(s.pre_measures, s.post_measures)
    +
    + + + + +
    +
    + +
    + + def + get_required_dataset_measures( model: Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]) -> RequiredDatasetMeasures: + + + +
    + +
    124def get_required_dataset_measures(model: AnyModelDescr) -> RequiredDatasetMeasures:
    +125    s = _prepare_setup_pre_and_postprocessing(model)
    +126    return RequiredDatasetMeasures(
    +127        {m for m in s.pre_measures if isinstance(m, DatasetMeasureBase)},
    +128        {m for m in s.post_measures if isinstance(m, DatasetMeasureBase)},
    +129    )
    +
    + + + + +
    +
    + +
    + + def + get_requried_sample_measures( model: Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]) -> RequiredSampleMeasures: + + + +
    + +
    132def get_requried_sample_measures(model: AnyModelDescr) -> RequiredSampleMeasures:
    +133    s = _prepare_setup_pre_and_postprocessing(model)
    +134    return RequiredSampleMeasures(
    +135        {m for m in s.pre_measures if isinstance(m, SampleMeasureBase)},
    +136        {m for m in s.post_measures if isinstance(m, SampleMeasureBase)},
    +137    )
    +
    + + + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/core/sample.html b/bioimageio/core/sample.html new file mode 100644 index 00000000..57b2364b --- /dev/null +++ b/bioimageio/core/sample.html @@ -0,0 +1,1784 @@ + + + + + + + bioimageio.core.sample API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.core.sample

    + + + + + + +
      1from __future__ import annotations
    +  2
    +  3from dataclasses import dataclass
    +  4from math import ceil, floor
    +  5from typing import (
    +  6    Callable,
    +  7    Dict,
    +  8    Generic,
    +  9    Iterable,
    + 10    Optional,
    + 11    Tuple,
    + 12    TypeVar,
    + 13    Union,
    + 14)
    + 15
    + 16import numpy as np
    + 17from typing_extensions import Self
    + 18
    + 19from .axis import AxisId, PerAxis
    + 20from .block import Block
    + 21from .block_meta import (
    + 22    BlockMeta,
    + 23    LinearAxisTransform,
    + 24    split_multiple_shapes_into_blocks,
    + 25)
    + 26from .common import (
    + 27    BlockIndex,
    + 28    Halo,
    + 29    HaloLike,
    + 30    MemberId,
    + 31    PadMode,
    + 32    PerMember,
    + 33    SampleId,
    + 34    SliceInfo,
    + 35    TotalNumberOfBlocks,
    + 36)
    + 37from .stat_measures import Stat
    + 38from .tensor import Tensor
    + 39
    + 40# TODO: allow for lazy samples to read/write to disk
    + 41
    + 42
    + 43@dataclass
    + 44class Sample:
    + 45    """A dataset sample"""
    + 46
    + 47    members: Dict[MemberId, Tensor]
    + 48    """the sample's tensors"""
    + 49
    + 50    stat: Stat
    + 51    """sample and dataset statistics"""
    + 52
    + 53    id: SampleId
    + 54    """identifier within the sample's dataset"""
    + 55
    + 56    @property
    + 57    def shape(self) -> PerMember[PerAxis[int]]:
    + 58        return {tid: t.sizes for tid, t in self.members.items()}
    + 59
    + 60    def split_into_blocks(
    + 61        self,
    + 62        block_shapes: PerMember[PerAxis[int]],
    + 63        halo: PerMember[PerAxis[HaloLike]],
    + 64        pad_mode: PadMode,
    + 65        broadcast: bool = False,
    + 66    ) -> Tuple[TotalNumberOfBlocks, Iterable[SampleBlockWithOrigin]]:
    + 67        assert not (
    + 68            missing := [m for m in block_shapes if m not in self.members]
    + 69        ), f"`block_shapes` specified for unknown members: {missing}"
    + 70        assert not (
    + 71            missing := [m for m in halo if m not in block_shapes]
    + 72        ), f"`halo` specified for members without `block_shape`: {missing}"
    + 73
    + 74        n_blocks, blocks = split_multiple_shapes_into_blocks(
    + 75            shapes=self.shape,
    + 76            block_shapes=block_shapes,
    + 77            halo=halo,
    + 78            broadcast=broadcast,
    + 79        )
    + 80        return n_blocks, sample_block_generator(blocks, origin=self, pad_mode=pad_mode)
    + 81
    + 82    def as_single_block(self, halo: Optional[PerMember[PerAxis[Halo]]] = None):
    + 83        if halo is None:
    + 84            halo = {}
    + 85        return SampleBlockWithOrigin(
    + 86            sample_shape=self.shape,
    + 87            sample_id=self.id,
    + 88            blocks={
    + 89                m: Block(
    + 90                    sample_shape=self.shape[m],
    + 91                    data=data,
    + 92                    inner_slice={
    + 93                        a: SliceInfo(0, s) for a, s in data.tagged_shape.items()
    + 94                    },
    + 95                    halo=halo.get(m, {}),
    + 96                    block_index=0,
    + 97                    blocks_in_sample=1,
    + 98                )
    + 99                for m, data in self.members.items()
    +100            },
    +101            stat=self.stat,
    +102            origin=self,
    +103            block_index=0,
    +104            blocks_in_sample=1,
    +105        )
    +106
    +107    @classmethod
    +108    def from_blocks(
    +109        cls,
    +110        sample_blocks: Iterable[SampleBlock],
    +111        *,
    +112        fill_value: float = float("nan"),
    +113    ) -> Self:
    +114        members: PerMember[Tensor] = {}
    +115        stat: Stat = {}
    +116        sample_id = None
    +117        for sample_block in sample_blocks:
    +118            assert sample_id is None or sample_id == sample_block.sample_id
    +119            sample_id = sample_block.sample_id
    +120            stat = sample_block.stat
    +121            for m, block in sample_block.blocks.items():
    +122                if m not in members:
    +123                    if -1 in block.sample_shape.values():
    +124                        raise NotImplementedError(
    +125                            "merging blocks with data dependent axis not yet implemented"
    +126                        )
    +127
    +128                    members[m] = Tensor(
    +129                        np.full(
    +130                            tuple(block.sample_shape[a] for a in block.data.dims),
    +131                            fill_value,
    +132                            dtype=block.data.dtype,
    +133                        ),
    +134                        dims=block.data.dims,
    +135                    )
    +136
    +137                members[m][block.inner_slice] = block.inner_data
    +138
    +139        return cls(members=members, stat=stat, id=sample_id)
    +140
    +141
    +142BlockT = TypeVar("BlockT", Block, BlockMeta)
    +143
    +144
    +145@dataclass
    +146class SampleBlockBase(Generic[BlockT]):
    +147    """base class for `SampleBlockMeta` and `SampleBlock`"""
    +148
    +149    sample_shape: PerMember[PerAxis[int]]
    +150    """the sample shape this block represents a part of"""
    +151
    +152    sample_id: SampleId
    +153    """identifier for the sample within its dataset"""
    +154
    +155    blocks: Dict[MemberId, BlockT]
    +156    """Individual tensor blocks comprising this sample block"""
    +157
    +158    block_index: BlockIndex
    +159    """the n-th block of the sample"""
    +160
    +161    blocks_in_sample: TotalNumberOfBlocks
    +162    """total number of blocks in the sample"""
    +163
    +164    @property
    +165    def shape(self) -> PerMember[PerAxis[int]]:
    +166        return {mid: b.shape for mid, b in self.blocks.items()}
    +167
    +168    @property
    +169    def inner_shape(self) -> PerMember[PerAxis[int]]:
    +170        return {mid: b.inner_shape for mid, b in self.blocks.items()}
    +171
    +172
    +173@dataclass
    +174class LinearSampleAxisTransform(LinearAxisTransform):
    +175    member: MemberId
    +176
    +177
    +178@dataclass
    +179class SampleBlockMeta(SampleBlockBase[BlockMeta]):
    +180    """Meta data of a dataset sample block"""
    +181
    +182    def get_transformed(
    +183        self, new_axes: PerMember[PerAxis[Union[LinearSampleAxisTransform, int]]]
    +184    ) -> Self:
    +185        sample_shape = {
    +186            m: {
    +187                a: (
    +188                    trf
    +189                    if isinstance(trf, int)
    +190                    else trf.compute(self.sample_shape[trf.member][trf.axis])
    +191                )
    +192                for a, trf in new_axes[m].items()
    +193            }
    +194            for m in new_axes
    +195        }
    +196
    +197        def get_member_halo(m: MemberId, round: Callable[[float], int]):
    +198            return {
    +199                a: (
    +200                    Halo(0, 0)
    +201                    if isinstance(trf, int)
    +202                    or trf.axis not in self.blocks[trf.member].halo
    +203                    else Halo(
    +204                        round(self.blocks[trf.member].halo[trf.axis].left * trf.scale),
    +205                        round(self.blocks[trf.member].halo[trf.axis].right * trf.scale),
    +206                    )
    +207                )
    +208                for a, trf in new_axes[m].items()
    +209            }
    +210
    +211        halo: Dict[MemberId, Dict[AxisId, Halo]] = {}
    +212        for m in new_axes:
    +213            halo[m] = get_member_halo(m, floor)
    +214            if halo[m] != get_member_halo(m, ceil):
    +215                raise ValueError(
    +216                    f"failed to unambiguously scale halo {halo[m]} with {new_axes[m]}"
    +217                    + f" for {m}."
    +218                )
    +219
    +220        inner_slice = {
    +221            m: {
    +222                a: (
    +223                    SliceInfo(0, trf)
    +224                    if isinstance(trf, int)
    +225                    else SliceInfo(
    +226                        trf.compute(
    +227                            self.blocks[trf.member].inner_slice[trf.axis].start
    +228                        ),
    +229                        trf.compute(self.blocks[trf.member].inner_slice[trf.axis].stop),
    +230                    )
    +231                )
    +232                for a, trf in new_axes[m].items()
    +233            }
    +234            for m in new_axes
    +235        }
    +236        return self.__class__(
    +237            blocks={
    +238                m: BlockMeta(
    +239                    sample_shape=sample_shape[m],
    +240                    inner_slice=inner_slice[m],
    +241                    halo=halo[m],
    +242                    block_index=self.block_index,
    +243                    blocks_in_sample=self.blocks_in_sample,
    +244                )
    +245                for m in new_axes
    +246            },
    +247            sample_shape=sample_shape,
    +248            sample_id=self.sample_id,
    +249            block_index=self.block_index,
    +250            blocks_in_sample=self.blocks_in_sample,
    +251        )
    +252
    +253    def with_data(self, data: PerMember[Tensor], *, stat: Stat) -> SampleBlock:
    +254        return SampleBlock(
    +255            sample_shape={
    +256                m: {
    +257                    a: data[m].tagged_shape[a] if s == -1 else s
    +258                    for a, s in member_shape.items()
    +259                }
    +260                for m, member_shape in self.sample_shape.items()
    +261            },
    +262            sample_id=self.sample_id,
    +263            blocks={
    +264                m: Block.from_meta(b, data=data[m]) for m, b in self.blocks.items()
    +265            },
    +266            stat=stat,
    +267            block_index=self.block_index,
    +268            blocks_in_sample=self.blocks_in_sample,
    +269        )
    +270
    +271
    +272@dataclass
    +273class SampleBlock(SampleBlockBase[Block]):
    +274    """A block of a dataset sample"""
    +275
    +276    stat: Stat
    +277    """computed statistics"""
    +278
    +279    @property
    +280    def members(self) -> PerMember[Tensor]:
    +281        """the sample block's tensors"""
    +282        return {m: b.data for m, b in self.blocks.items()}
    +283
    +284    def get_transformed_meta(
    +285        self, new_axes: PerMember[PerAxis[Union[LinearSampleAxisTransform, int]]]
    +286    ) -> SampleBlockMeta:
    +287        return SampleBlockMeta(
    +288            sample_id=self.sample_id,
    +289            blocks=dict(self.blocks),
    +290            sample_shape=self.sample_shape,
    +291            block_index=self.block_index,
    +292            blocks_in_sample=self.blocks_in_sample,
    +293        ).get_transformed(new_axes)
    +294
    +295
    +296@dataclass
    +297class SampleBlockWithOrigin(SampleBlock):
    +298    """A `SampleBlock` with a reference (`origin`) to the whole `Sample`"""
    +299
    +300    origin: Sample
    +301    """the sample this sample block was taken from"""
    +302
    +303
    +304class _ConsolidatedMemberBlocks:
    +305    def __init__(self, blocks: PerMember[BlockMeta]):
    +306        super().__init__()
    +307        block_indices = {b.block_index for b in blocks.values()}
    +308        assert len(block_indices) == 1
    +309        self.block_index = block_indices.pop()
    +310        blocks_in_samples = {b.blocks_in_sample for b in blocks.values()}
    +311        assert len(blocks_in_samples) == 1
    +312        self.blocks_in_sample = blocks_in_samples.pop()
    +313
    +314
    +315def sample_block_meta_generator(
    +316    blocks: Iterable[PerMember[BlockMeta]],
    +317    *,
    +318    sample_shape: PerMember[PerAxis[int]],
    +319    sample_id: SampleId,
    +320):
    +321    for member_blocks in blocks:
    +322        cons = _ConsolidatedMemberBlocks(member_blocks)
    +323        yield SampleBlockMeta(
    +324            blocks=dict(member_blocks),
    +325            sample_shape=sample_shape,
    +326            sample_id=sample_id,
    +327            block_index=cons.block_index,
    +328            blocks_in_sample=cons.blocks_in_sample,
    +329        )
    +330
    +331
    +332def sample_block_generator(
    +333    blocks: Iterable[PerMember[BlockMeta]],
    +334    *,
    +335    origin: Sample,
    +336    pad_mode: PadMode,
    +337) -> Iterable[SampleBlockWithOrigin]:
    +338    for member_blocks in blocks:
    +339        cons = _ConsolidatedMemberBlocks(member_blocks)
    +340        yield SampleBlockWithOrigin(
    +341            blocks={
    +342                m: Block.from_sample_member(
    +343                    origin.members[m], block=member_blocks[m], pad_mode=pad_mode
    +344                )
    +345                for m in origin.members
    +346            },
    +347            sample_shape=origin.shape,
    +348            origin=origin,
    +349            stat=origin.stat,
    +350            sample_id=origin.id,
    +351            block_index=cons.block_index,
    +352            blocks_in_sample=cons.blocks_in_sample,
    +353        )
    +
    + + +
    +
    + +
    +
    @dataclass
    + + class + Sample: + + + +
    + +
     44@dataclass
    + 45class Sample:
    + 46    """A dataset sample"""
    + 47
    + 48    members: Dict[MemberId, Tensor]
    + 49    """the sample's tensors"""
    + 50
    + 51    stat: Stat
    + 52    """sample and dataset statistics"""
    + 53
    + 54    id: SampleId
    + 55    """identifier within the sample's dataset"""
    + 56
    + 57    @property
    + 58    def shape(self) -> PerMember[PerAxis[int]]:
    + 59        return {tid: t.sizes for tid, t in self.members.items()}
    + 60
    + 61    def split_into_blocks(
    + 62        self,
    + 63        block_shapes: PerMember[PerAxis[int]],
    + 64        halo: PerMember[PerAxis[HaloLike]],
    + 65        pad_mode: PadMode,
    + 66        broadcast: bool = False,
    + 67    ) -> Tuple[TotalNumberOfBlocks, Iterable[SampleBlockWithOrigin]]:
    + 68        assert not (
    + 69            missing := [m for m in block_shapes if m not in self.members]
    + 70        ), f"`block_shapes` specified for unknown members: {missing}"
    + 71        assert not (
    + 72            missing := [m for m in halo if m not in block_shapes]
    + 73        ), f"`halo` specified for members without `block_shape`: {missing}"
    + 74
    + 75        n_blocks, blocks = split_multiple_shapes_into_blocks(
    + 76            shapes=self.shape,
    + 77            block_shapes=block_shapes,
    + 78            halo=halo,
    + 79            broadcast=broadcast,
    + 80        )
    + 81        return n_blocks, sample_block_generator(blocks, origin=self, pad_mode=pad_mode)
    + 82
    + 83    def as_single_block(self, halo: Optional[PerMember[PerAxis[Halo]]] = None):
    + 84        if halo is None:
    + 85            halo = {}
    + 86        return SampleBlockWithOrigin(
    + 87            sample_shape=self.shape,
    + 88            sample_id=self.id,
    + 89            blocks={
    + 90                m: Block(
    + 91                    sample_shape=self.shape[m],
    + 92                    data=data,
    + 93                    inner_slice={
    + 94                        a: SliceInfo(0, s) for a, s in data.tagged_shape.items()
    + 95                    },
    + 96                    halo=halo.get(m, {}),
    + 97                    block_index=0,
    + 98                    blocks_in_sample=1,
    + 99                )
    +100                for m, data in self.members.items()
    +101            },
    +102            stat=self.stat,
    +103            origin=self,
    +104            block_index=0,
    +105            blocks_in_sample=1,
    +106        )
    +107
    +108    @classmethod
    +109    def from_blocks(
    +110        cls,
    +111        sample_blocks: Iterable[SampleBlock],
    +112        *,
    +113        fill_value: float = float("nan"),
    +114    ) -> Self:
    +115        members: PerMember[Tensor] = {}
    +116        stat: Stat = {}
    +117        sample_id = None
    +118        for sample_block in sample_blocks:
    +119            assert sample_id is None or sample_id == sample_block.sample_id
    +120            sample_id = sample_block.sample_id
    +121            stat = sample_block.stat
    +122            for m, block in sample_block.blocks.items():
    +123                if m not in members:
    +124                    if -1 in block.sample_shape.values():
    +125                        raise NotImplementedError(
    +126                            "merging blocks with data dependent axis not yet implemented"
    +127                        )
    +128
    +129                    members[m] = Tensor(
    +130                        np.full(
    +131                            tuple(block.sample_shape[a] for a in block.data.dims),
    +132                            fill_value,
    +133                            dtype=block.data.dtype,
    +134                        ),
    +135                        dims=block.data.dims,
    +136                    )
    +137
    +138                members[m][block.inner_slice] = block.inner_data
    +139
    +140        return cls(members=members, stat=stat, id=sample_id)
    +
    + + +

    A dataset sample

    +
    + + +
    +
    + + Sample( members: Dict[bioimageio.spec.model.v0_5.TensorId, bioimageio.core.Tensor], stat: Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]], id: Hashable) + + +
    + + + + +
    +
    + + + +

    the sample's tensors

    +
    + + +
    +
    +
    + stat: Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]] + + +
    + + +

    sample and dataset statistics

    +
    + + +
    +
    +
    + id: Hashable + + +
    + + +

    identifier within the sample's dataset

    +
    + + +
    +
    + +
    + shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]] + + + +
    + +
    57    @property
    +58    def shape(self) -> PerMember[PerAxis[int]]:
    +59        return {tid: t.sizes for tid, t in self.members.items()}
    +
    + + + + +
    +
    + +
    + + def + split_into_blocks( self, block_shapes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]], halo: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, Union[int, Tuple[int, int], bioimageio.core.common.Halo]]], pad_mode: Literal['edge', 'reflect', 'symmetric'], broadcast: bool = False) -> Tuple[int, Iterable[SampleBlockWithOrigin]]: + + + +
    + +
    61    def split_into_blocks(
    +62        self,
    +63        block_shapes: PerMember[PerAxis[int]],
    +64        halo: PerMember[PerAxis[HaloLike]],
    +65        pad_mode: PadMode,
    +66        broadcast: bool = False,
    +67    ) -> Tuple[TotalNumberOfBlocks, Iterable[SampleBlockWithOrigin]]:
    +68        assert not (
    +69            missing := [m for m in block_shapes if m not in self.members]
    +70        ), f"`block_shapes` specified for unknown members: {missing}"
    +71        assert not (
    +72            missing := [m for m in halo if m not in block_shapes]
    +73        ), f"`halo` specified for members without `block_shape`: {missing}"
    +74
    +75        n_blocks, blocks = split_multiple_shapes_into_blocks(
    +76            shapes=self.shape,
    +77            block_shapes=block_shapes,
    +78            halo=halo,
    +79            broadcast=broadcast,
    +80        )
    +81        return n_blocks, sample_block_generator(blocks, origin=self, pad_mode=pad_mode)
    +
    + + + + +
    +
    + +
    + + def + as_single_block( self, halo: Optional[Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, bioimageio.core.common.Halo]]] = None): + + + +
    + +
     83    def as_single_block(self, halo: Optional[PerMember[PerAxis[Halo]]] = None):
    + 84        if halo is None:
    + 85            halo = {}
    + 86        return SampleBlockWithOrigin(
    + 87            sample_shape=self.shape,
    + 88            sample_id=self.id,
    + 89            blocks={
    + 90                m: Block(
    + 91                    sample_shape=self.shape[m],
    + 92                    data=data,
    + 93                    inner_slice={
    + 94                        a: SliceInfo(0, s) for a, s in data.tagged_shape.items()
    + 95                    },
    + 96                    halo=halo.get(m, {}),
    + 97                    block_index=0,
    + 98                    blocks_in_sample=1,
    + 99                )
    +100                for m, data in self.members.items()
    +101            },
    +102            stat=self.stat,
    +103            origin=self,
    +104            block_index=0,
    +105            blocks_in_sample=1,
    +106        )
    +
    + + + + +
    +
    + +
    +
    @classmethod
    + + def + from_blocks( cls, sample_blocks: Iterable[SampleBlock], *, fill_value: float = nan) -> Self: + + + +
    + +
    108    @classmethod
    +109    def from_blocks(
    +110        cls,
    +111        sample_blocks: Iterable[SampleBlock],
    +112        *,
    +113        fill_value: float = float("nan"),
    +114    ) -> Self:
    +115        members: PerMember[Tensor] = {}
    +116        stat: Stat = {}
    +117        sample_id = None
    +118        for sample_block in sample_blocks:
    +119            assert sample_id is None or sample_id == sample_block.sample_id
    +120            sample_id = sample_block.sample_id
    +121            stat = sample_block.stat
    +122            for m, block in sample_block.blocks.items():
    +123                if m not in members:
    +124                    if -1 in block.sample_shape.values():
    +125                        raise NotImplementedError(
    +126                            "merging blocks with data dependent axis not yet implemented"
    +127                        )
    +128
    +129                    members[m] = Tensor(
    +130                        np.full(
    +131                            tuple(block.sample_shape[a] for a in block.data.dims),
    +132                            fill_value,
    +133                            dtype=block.data.dtype,
    +134                        ),
    +135                        dims=block.data.dims,
    +136                    )
    +137
    +138                members[m][block.inner_slice] = block.inner_data
    +139
    +140        return cls(members=members, stat=stat, id=sample_id)
    +
    + + + + +
    +
    +
    + +
    +
    @dataclass
    + + class + SampleBlockBase(typing.Generic[~BlockT]): + + + +
    + +
    146@dataclass
    +147class SampleBlockBase(Generic[BlockT]):
    +148    """base class for `SampleBlockMeta` and `SampleBlock`"""
    +149
    +150    sample_shape: PerMember[PerAxis[int]]
    +151    """the sample shape this block represents a part of"""
    +152
    +153    sample_id: SampleId
    +154    """identifier for the sample within its dataset"""
    +155
    +156    blocks: Dict[MemberId, BlockT]
    +157    """Individual tensor blocks comprising this sample block"""
    +158
    +159    block_index: BlockIndex
    +160    """the n-th block of the sample"""
    +161
    +162    blocks_in_sample: TotalNumberOfBlocks
    +163    """total number of blocks in the sample"""
    +164
    +165    @property
    +166    def shape(self) -> PerMember[PerAxis[int]]:
    +167        return {mid: b.shape for mid, b in self.blocks.items()}
    +168
    +169    @property
    +170    def inner_shape(self) -> PerMember[PerAxis[int]]:
    +171        return {mid: b.inner_shape for mid, b in self.blocks.items()}
    +
    + + +

    base class for SampleBlockMeta and SampleBlock

    +
    + + +
    +
    + + SampleBlockBase( sample_shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]], sample_id: Hashable, blocks: Dict[bioimageio.spec.model.v0_5.TensorId, ~BlockT], block_index: int, blocks_in_sample: int) + + +
    + + + + +
    +
    +
    + sample_shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]] + + +
    + + +

    the sample shape this block represents a part of

    +
    + + +
    +
    +
    + sample_id: Hashable + + +
    + + +

    identifier for the sample within its dataset

    +
    + + +
    +
    +
    + blocks: Dict[bioimageio.spec.model.v0_5.TensorId, ~BlockT] + + +
    + + +

    Individual tensor blocks comprising this sample block

    +
    + + +
    +
    +
    + block_index: int + + +
    + + +

    the n-th block of the sample

    +
    + + +
    +
    +
    + blocks_in_sample: int + + +
    + + +

    total number of blocks in the sample

    +
    + + +
    +
    + +
    + shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]] + + + +
    + +
    165    @property
    +166    def shape(self) -> PerMember[PerAxis[int]]:
    +167        return {mid: b.shape for mid, b in self.blocks.items()}
    +
    + + + + +
    +
    + +
    + inner_shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]] + + + +
    + +
    169    @property
    +170    def inner_shape(self) -> PerMember[PerAxis[int]]:
    +171        return {mid: b.inner_shape for mid, b in self.blocks.items()}
    +
    + + + + +
    +
    +
    + +
    +
    @dataclass
    + + class + LinearSampleAxisTransform(bioimageio.core.block_meta.LinearAxisTransform): + + + +
    + +
    174@dataclass
    +175class LinearSampleAxisTransform(LinearAxisTransform):
    +176    member: MemberId
    +
    + + + + +
    +
    + + LinearSampleAxisTransform( axis: bioimageio.spec.model.v0_5.AxisId, scale: float, offset: int, member: bioimageio.spec.model.v0_5.TensorId) + + +
    + + + + +
    +
    + + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    +
    @dataclass
    + + class + SampleBlockMeta(bioimageio.core.sample.SampleBlockBase[bioimageio.core.block_meta.BlockMeta]): + + + +
    + +
    179@dataclass
    +180class SampleBlockMeta(SampleBlockBase[BlockMeta]):
    +181    """Meta data of a dataset sample block"""
    +182
    +183    def get_transformed(
    +184        self, new_axes: PerMember[PerAxis[Union[LinearSampleAxisTransform, int]]]
    +185    ) -> Self:
    +186        sample_shape = {
    +187            m: {
    +188                a: (
    +189                    trf
    +190                    if isinstance(trf, int)
    +191                    else trf.compute(self.sample_shape[trf.member][trf.axis])
    +192                )
    +193                for a, trf in new_axes[m].items()
    +194            }
    +195            for m in new_axes
    +196        }
    +197
    +198        def get_member_halo(m: MemberId, round: Callable[[float], int]):
    +199            return {
    +200                a: (
    +201                    Halo(0, 0)
    +202                    if isinstance(trf, int)
    +203                    or trf.axis not in self.blocks[trf.member].halo
    +204                    else Halo(
    +205                        round(self.blocks[trf.member].halo[trf.axis].left * trf.scale),
    +206                        round(self.blocks[trf.member].halo[trf.axis].right * trf.scale),
    +207                    )
    +208                )
    +209                for a, trf in new_axes[m].items()
    +210            }
    +211
    +212        halo: Dict[MemberId, Dict[AxisId, Halo]] = {}
    +213        for m in new_axes:
    +214            halo[m] = get_member_halo(m, floor)
    +215            if halo[m] != get_member_halo(m, ceil):
    +216                raise ValueError(
    +217                    f"failed to unambiguously scale halo {halo[m]} with {new_axes[m]}"
    +218                    + f" for {m}."
    +219                )
    +220
    +221        inner_slice = {
    +222            m: {
    +223                a: (
    +224                    SliceInfo(0, trf)
    +225                    if isinstance(trf, int)
    +226                    else SliceInfo(
    +227                        trf.compute(
    +228                            self.blocks[trf.member].inner_slice[trf.axis].start
    +229                        ),
    +230                        trf.compute(self.blocks[trf.member].inner_slice[trf.axis].stop),
    +231                    )
    +232                )
    +233                for a, trf in new_axes[m].items()
    +234            }
    +235            for m in new_axes
    +236        }
    +237        return self.__class__(
    +238            blocks={
    +239                m: BlockMeta(
    +240                    sample_shape=sample_shape[m],
    +241                    inner_slice=inner_slice[m],
    +242                    halo=halo[m],
    +243                    block_index=self.block_index,
    +244                    blocks_in_sample=self.blocks_in_sample,
    +245                )
    +246                for m in new_axes
    +247            },
    +248            sample_shape=sample_shape,
    +249            sample_id=self.sample_id,
    +250            block_index=self.block_index,
    +251            blocks_in_sample=self.blocks_in_sample,
    +252        )
    +253
    +254    def with_data(self, data: PerMember[Tensor], *, stat: Stat) -> SampleBlock:
    +255        return SampleBlock(
    +256            sample_shape={
    +257                m: {
    +258                    a: data[m].tagged_shape[a] if s == -1 else s
    +259                    for a, s in member_shape.items()
    +260                }
    +261                for m, member_shape in self.sample_shape.items()
    +262            },
    +263            sample_id=self.sample_id,
    +264            blocks={
    +265                m: Block.from_meta(b, data=data[m]) for m, b in self.blocks.items()
    +266            },
    +267            stat=stat,
    +268            block_index=self.block_index,
    +269            blocks_in_sample=self.blocks_in_sample,
    +270        )
    +
    + + +

    Meta data of a dataset sample block

    +
    + + +
    +
    + + SampleBlockMeta( sample_shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]], sample_id: Hashable, blocks: Dict[bioimageio.spec.model.v0_5.TensorId, ~BlockT], block_index: int, blocks_in_sample: int) + + +
    + + + + +
    +
    + +
    + + def + get_transformed( self, new_axes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, Union[LinearSampleAxisTransform, int]]]) -> Self: + + + +
    + +
    183    def get_transformed(
    +184        self, new_axes: PerMember[PerAxis[Union[LinearSampleAxisTransform, int]]]
    +185    ) -> Self:
    +186        sample_shape = {
    +187            m: {
    +188                a: (
    +189                    trf
    +190                    if isinstance(trf, int)
    +191                    else trf.compute(self.sample_shape[trf.member][trf.axis])
    +192                )
    +193                for a, trf in new_axes[m].items()
    +194            }
    +195            for m in new_axes
    +196        }
    +197
    +198        def get_member_halo(m: MemberId, round: Callable[[float], int]):
    +199            return {
    +200                a: (
    +201                    Halo(0, 0)
    +202                    if isinstance(trf, int)
    +203                    or trf.axis not in self.blocks[trf.member].halo
    +204                    else Halo(
    +205                        round(self.blocks[trf.member].halo[trf.axis].left * trf.scale),
    +206                        round(self.blocks[trf.member].halo[trf.axis].right * trf.scale),
    +207                    )
    +208                )
    +209                for a, trf in new_axes[m].items()
    +210            }
    +211
    +212        halo: Dict[MemberId, Dict[AxisId, Halo]] = {}
    +213        for m in new_axes:
    +214            halo[m] = get_member_halo(m, floor)
    +215            if halo[m] != get_member_halo(m, ceil):
    +216                raise ValueError(
    +217                    f"failed to unambiguously scale halo {halo[m]} with {new_axes[m]}"
    +218                    + f" for {m}."
    +219                )
    +220
    +221        inner_slice = {
    +222            m: {
    +223                a: (
    +224                    SliceInfo(0, trf)
    +225                    if isinstance(trf, int)
    +226                    else SliceInfo(
    +227                        trf.compute(
    +228                            self.blocks[trf.member].inner_slice[trf.axis].start
    +229                        ),
    +230                        trf.compute(self.blocks[trf.member].inner_slice[trf.axis].stop),
    +231                    )
    +232                )
    +233                for a, trf in new_axes[m].items()
    +234            }
    +235            for m in new_axes
    +236        }
    +237        return self.__class__(
    +238            blocks={
    +239                m: BlockMeta(
    +240                    sample_shape=sample_shape[m],
    +241                    inner_slice=inner_slice[m],
    +242                    halo=halo[m],
    +243                    block_index=self.block_index,
    +244                    blocks_in_sample=self.blocks_in_sample,
    +245                )
    +246                for m in new_axes
    +247            },
    +248            sample_shape=sample_shape,
    +249            sample_id=self.sample_id,
    +250            block_index=self.block_index,
    +251            blocks_in_sample=self.blocks_in_sample,
    +252        )
    +
    + + + + +
    +
    + +
    + + def + with_data( self, data: Mapping[bioimageio.spec.model.v0_5.TensorId, bioimageio.core.Tensor], *, stat: Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]]) -> SampleBlock: + + + +
    + +
    254    def with_data(self, data: PerMember[Tensor], *, stat: Stat) -> SampleBlock:
    +255        return SampleBlock(
    +256            sample_shape={
    +257                m: {
    +258                    a: data[m].tagged_shape[a] if s == -1 else s
    +259                    for a, s in member_shape.items()
    +260                }
    +261                for m, member_shape in self.sample_shape.items()
    +262            },
    +263            sample_id=self.sample_id,
    +264            blocks={
    +265                m: Block.from_meta(b, data=data[m]) for m, b in self.blocks.items()
    +266            },
    +267            stat=stat,
    +268            block_index=self.block_index,
    +269            blocks_in_sample=self.blocks_in_sample,
    +270        )
    +
    + + + + +
    + +
    +
    + +
    +
    @dataclass
    + + class + SampleBlock(bioimageio.core.sample.SampleBlockBase[bioimageio.core.block.Block]): + + + +
    + +
    273@dataclass
    +274class SampleBlock(SampleBlockBase[Block]):
    +275    """A block of a dataset sample"""
    +276
    +277    stat: Stat
    +278    """computed statistics"""
    +279
    +280    @property
    +281    def members(self) -> PerMember[Tensor]:
    +282        """the sample block's tensors"""
    +283        return {m: b.data for m, b in self.blocks.items()}
    +284
    +285    def get_transformed_meta(
    +286        self, new_axes: PerMember[PerAxis[Union[LinearSampleAxisTransform, int]]]
    +287    ) -> SampleBlockMeta:
    +288        return SampleBlockMeta(
    +289            sample_id=self.sample_id,
    +290            blocks=dict(self.blocks),
    +291            sample_shape=self.sample_shape,
    +292            block_index=self.block_index,
    +293            blocks_in_sample=self.blocks_in_sample,
    +294        ).get_transformed(new_axes)
    +
    + + +

    A block of a dataset sample

    +
    + + +
    +
    + + SampleBlock( sample_shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]], sample_id: Hashable, blocks: Dict[bioimageio.spec.model.v0_5.TensorId, ~BlockT], block_index: int, blocks_in_sample: int, stat: Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]]) + + +
    + + + + +
    +
    +
    + stat: Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]] + + +
    + + +

    computed statistics

    +
    + + +
    +
    + +
    + members: Mapping[bioimageio.spec.model.v0_5.TensorId, bioimageio.core.Tensor] + + + +
    + +
    280    @property
    +281    def members(self) -> PerMember[Tensor]:
    +282        """the sample block's tensors"""
    +283        return {m: b.data for m, b in self.blocks.items()}
    +
    + + +

    the sample block's tensors

    +
    + + +
    +
    + +
    + + def + get_transformed_meta( self, new_axes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, Union[LinearSampleAxisTransform, int]]]) -> SampleBlockMeta: + + + +
    + +
    285    def get_transformed_meta(
    +286        self, new_axes: PerMember[PerAxis[Union[LinearSampleAxisTransform, int]]]
    +287    ) -> SampleBlockMeta:
    +288        return SampleBlockMeta(
    +289            sample_id=self.sample_id,
    +290            blocks=dict(self.blocks),
    +291            sample_shape=self.sample_shape,
    +292            block_index=self.block_index,
    +293            blocks_in_sample=self.blocks_in_sample,
    +294        ).get_transformed(new_axes)
    +
    + + + + +
    + +
    +
    + +
    +
    @dataclass
    + + class + SampleBlockWithOrigin(bioimageio.core.sample.SampleBlockBase[bioimageio.core.block.Block]): + + + +
    + +
    297@dataclass
    +298class SampleBlockWithOrigin(SampleBlock):
    +299    """A `SampleBlock` with a reference (`origin`) to the whole `Sample`"""
    +300
    +301    origin: Sample
    +302    """the sample this sample block was taken from"""
    +
    + + +

    A SampleBlock with a reference (origin) to the whole Sample

    +
    + + +
    +
    + + SampleBlockWithOrigin( sample_shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]], sample_id: Hashable, blocks: Dict[bioimageio.spec.model.v0_5.TensorId, ~BlockT], block_index: int, blocks_in_sample: int, stat: Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]], origin: Sample) + + +
    + + + + +
    +
    +
    + origin: Sample + + +
    + + +

    the sample this sample block was taken from

    +
    + + +
    + +
    +
    + +
    + + def + sample_block_meta_generator( blocks: Iterable[Mapping[bioimageio.spec.model.v0_5.TensorId, bioimageio.core.BlockMeta]], *, sample_shape: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]], sample_id: Hashable): + + + +
    + +
    316def sample_block_meta_generator(
    +317    blocks: Iterable[PerMember[BlockMeta]],
    +318    *,
    +319    sample_shape: PerMember[PerAxis[int]],
    +320    sample_id: SampleId,
    +321):
    +322    for member_blocks in blocks:
    +323        cons = _ConsolidatedMemberBlocks(member_blocks)
    +324        yield SampleBlockMeta(
    +325            blocks=dict(member_blocks),
    +326            sample_shape=sample_shape,
    +327            sample_id=sample_id,
    +328            block_index=cons.block_index,
    +329            blocks_in_sample=cons.blocks_in_sample,
    +330        )
    +
    + + + + +
    +
    + +
    + + def + sample_block_generator( blocks: Iterable[Mapping[bioimageio.spec.model.v0_5.TensorId, bioimageio.core.BlockMeta]], *, origin: Sample, pad_mode: Literal['edge', 'reflect', 'symmetric']) -> Iterable[SampleBlockWithOrigin]: + + + +
    + +
    333def sample_block_generator(
    +334    blocks: Iterable[PerMember[BlockMeta]],
    +335    *,
    +336    origin: Sample,
    +337    pad_mode: PadMode,
    +338) -> Iterable[SampleBlockWithOrigin]:
    +339    for member_blocks in blocks:
    +340        cons = _ConsolidatedMemberBlocks(member_blocks)
    +341        yield SampleBlockWithOrigin(
    +342            blocks={
    +343                m: Block.from_sample_member(
    +344                    origin.members[m], block=member_blocks[m], pad_mode=pad_mode
    +345                )
    +346                for m in origin.members
    +347            },
    +348            sample_shape=origin.shape,
    +349            origin=origin,
    +350            stat=origin.stat,
    +351            sample_id=origin.id,
    +352            block_index=cons.block_index,
    +353            blocks_in_sample=cons.blocks_in_sample,
    +354        )
    +
    + + + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/core/stat_calculators.html b/bioimageio/core/stat_calculators.html new file mode 100644 index 00000000..6e13db1a --- /dev/null +++ b/bioimageio/core/stat_calculators.html @@ -0,0 +1,2560 @@ + + + + + + + bioimageio.core.stat_calculators API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.core.stat_calculators

    + + + + + + +
      1from __future__ import annotations
    +  2
    +  3import collections.abc
    +  4import warnings
    +  5from itertools import product
    +  6from typing import (
    +  7    Any,
    +  8    Collection,
    +  9    Dict,
    + 10    Iterable,
    + 11    Iterator,
    + 12    List,
    + 13    Mapping,
    + 14    Optional,
    + 15    OrderedDict,
    + 16    Sequence,
    + 17    Set,
    + 18    Tuple,
    + 19    Type,
    + 20    Union,
    + 21)
    + 22
    + 23import numpy as np
    + 24import xarray as xr
    + 25from loguru import logger
    + 26from numpy.typing import NDArray
    + 27from typing_extensions import assert_never
    + 28
    + 29from .axis import AxisId, PerAxis
    + 30from .common import MemberId
    + 31from .sample import Sample
    + 32from .stat_measures import (
    + 33    DatasetMean,
    + 34    DatasetMeasure,
    + 35    DatasetMeasureBase,
    + 36    DatasetPercentile,
    + 37    DatasetStd,
    + 38    DatasetVar,
    + 39    Measure,
    + 40    MeasureValue,
    + 41    SampleMean,
    + 42    SampleMeasure,
    + 43    SampleQuantile,
    + 44    SampleStd,
    + 45    SampleVar,
    + 46)
    + 47from .tensor import Tensor
    + 48
    + 49try:
    + 50    import crick
    + 51
    + 52except Exception:
    + 53    crick = None
    + 54
    + 55    class TDigest:
    + 56        def update(self, obj: Any):
    + 57            pass
    + 58
    + 59        def quantile(self, q: Any) -> Any:
    + 60            pass
    + 61
    + 62else:
    + 63    TDigest = crick.TDigest  # type: ignore
    + 64
    + 65
    + 66class MeanCalculator:
    + 67    """to calculate sample and dataset mean for in-memory samples"""
    + 68
    + 69    def __init__(self, member_id: MemberId, axes: Optional[Sequence[AxisId]]):
    + 70        super().__init__()
    + 71        self._n: int = 0
    + 72        self._mean: Optional[Tensor] = None
    + 73        self._axes = None if axes is None else tuple(axes)
    + 74        self._member_id = member_id
    + 75        self._sample_mean = SampleMean(member_id=self._member_id, axes=self._axes)
    + 76        self._dataset_mean = DatasetMean(member_id=self._member_id, axes=self._axes)
    + 77
    + 78    def compute(self, sample: Sample) -> Dict[SampleMean, MeasureValue]:
    + 79        return {self._sample_mean: self._compute_impl(sample)}
    + 80
    + 81    def _compute_impl(self, sample: Sample) -> Tensor:
    + 82        tensor = sample.members[self._member_id].astype("float64", copy=False)
    + 83        return tensor.mean(dim=self._axes)
    + 84
    + 85    def update(self, sample: Sample) -> None:
    + 86        mean = self._compute_impl(sample)
    + 87        self._update_impl(sample.members[self._member_id], mean)
    + 88
    + 89    def compute_and_update(self, sample: Sample) -> Dict[SampleMean, MeasureValue]:
    + 90        mean = self._compute_impl(sample)
    + 91        self._update_impl(sample.members[self._member_id], mean)
    + 92        return {self._sample_mean: mean}
    + 93
    + 94    def _update_impl(self, tensor: Tensor, tensor_mean: Tensor):
    + 95        assert tensor_mean.dtype == "float64"
    + 96        # reduced voxel count
    + 97        n_b = int(tensor.size / tensor_mean.size)
    + 98
    + 99        if self._mean is None:
    +100            assert self._n == 0
    +101            self._n = n_b
    +102            self._mean = tensor_mean
    +103        else:
    +104            assert self._n != 0
    +105            n_a = self._n
    +106            mean_old = self._mean
    +107            self._n = n_a + n_b
    +108            self._mean = (n_a * mean_old + n_b * tensor_mean) / self._n
    +109            assert self._mean.dtype == "float64"
    +110
    +111    def finalize(self) -> Dict[DatasetMean, MeasureValue]:
    +112        if self._mean is None:
    +113            return {}
    +114        else:
    +115            return {self._dataset_mean: self._mean}
    +116
    +117
    +118class MeanVarStdCalculator:
    +119    """to calculate sample and dataset mean, variance or standard deviation"""
    +120
    +121    def __init__(self, member_id: MemberId, axes: Optional[Sequence[AxisId]]):
    +122        super().__init__()
    +123        self._axes = None if axes is None else tuple(axes)
    +124        self._member_id = member_id
    +125        self._n: int = 0
    +126        self._mean: Optional[Tensor] = None
    +127        self._m2: Optional[Tensor] = None
    +128
    +129    def compute(
    +130        self, sample: Sample
    +131    ) -> Dict[Union[SampleMean, SampleVar, SampleStd], MeasureValue]:
    +132        tensor = sample.members[self._member_id]
    +133        mean = tensor.mean(dim=self._axes)
    +134        c = (tensor - mean).data
    +135        if self._axes is None:
    +136            n = tensor.size
    +137        else:
    +138            n = int(np.prod([tensor.sizes[d] for d in self._axes]))
    +139
    +140        var = xr.dot(c, c, dims=self._axes) / n
    +141        assert isinstance(var, xr.DataArray)
    +142        std = np.sqrt(var)
    +143        assert isinstance(std, xr.DataArray)
    +144        return {
    +145            SampleMean(axes=self._axes, member_id=self._member_id): mean,
    +146            SampleVar(axes=self._axes, member_id=self._member_id): Tensor.from_xarray(
    +147                var
    +148            ),
    +149            SampleStd(axes=self._axes, member_id=self._member_id): Tensor.from_xarray(
    +150                std
    +151            ),
    +152        }
    +153
    +154    def update(self, sample: Sample):
    +155        tensor = sample.members[self._member_id].astype("float64", copy=False)
    +156        mean_b = tensor.mean(dim=self._axes)
    +157        assert mean_b.dtype == "float64"
    +158        # reduced voxel count
    +159        n_b = int(tensor.size / mean_b.size)
    +160        m2_b = ((tensor - mean_b) ** 2).sum(dim=self._axes)
    +161        assert m2_b.dtype == "float64"
    +162        if self._mean is None:
    +163            assert self._m2 is None
    +164            self._n = n_b
    +165            self._mean = mean_b
    +166            self._m2 = m2_b
    +167        else:
    +168            n_a = self._n
    +169            mean_a = self._mean
    +170            m2_a = self._m2
    +171            self._n = n = n_a + n_b
    +172            self._mean = (n_a * mean_a + n_b * mean_b) / n
    +173            assert self._mean.dtype == "float64"
    +174            d = mean_b - mean_a
    +175            self._m2 = m2_a + m2_b + d**2 * n_a * n_b / n
    +176            assert self._m2.dtype == "float64"
    +177
    +178    def finalize(
    +179        self,
    +180    ) -> Dict[Union[DatasetMean, DatasetVar, DatasetStd], MeasureValue]:
    +181        if self._mean is None:
    +182            return {}
    +183        else:
    +184            assert self._m2 is not None
    +185            var = self._m2 / self._n
    +186            sqrt = np.sqrt(var)
    +187            if isinstance(sqrt, (int, float)):
    +188                # var and mean are scalar tensors, let's keep it consistent
    +189                sqrt = Tensor.from_xarray(xr.DataArray(sqrt))
    +190
    +191            assert isinstance(sqrt, Tensor), type(sqrt)
    +192            return {
    +193                DatasetMean(member_id=self._member_id, axes=self._axes): self._mean,
    +194                DatasetVar(member_id=self._member_id, axes=self._axes): var,
    +195                DatasetStd(member_id=self._member_id, axes=self._axes): sqrt,
    +196            }
    +197
    +198
    +199class SamplePercentilesCalculator:
    +200    """to calculate sample percentiles"""
    +201
    +202    def __init__(
    +203        self,
    +204        member_id: MemberId,
    +205        axes: Optional[Sequence[AxisId]],
    +206        qs: Collection[float],
    +207    ):
    +208        super().__init__()
    +209        assert all(0.0 <= q <= 1.0 for q in qs)
    +210        self._qs = sorted(set(qs))
    +211        self._axes = None if axes is None else tuple(axes)
    +212        self._member_id = member_id
    +213
    +214    def compute(self, sample: Sample) -> Dict[SampleQuantile, MeasureValue]:
    +215        tensor = sample.members[self._member_id]
    +216        ps = tensor.quantile(self._qs, dim=self._axes)
    +217        return {
    +218            SampleQuantile(q=q, axes=self._axes, member_id=self._member_id): p
    +219            for q, p in zip(self._qs, ps)
    +220        }
    +221
    +222
    +223class MeanPercentilesCalculator:
    +224    """to calculate dataset percentiles heuristically by averaging across samples
    +225    **note**: the returned dataset percentiles are an estiamte and **not mathematically correct**
    +226    """
    +227
    +228    def __init__(
    +229        self,
    +230        member_id: MemberId,
    +231        axes: Optional[Sequence[AxisId]],
    +232        qs: Collection[float],
    +233    ):
    +234        super().__init__()
    +235        assert all(0.0 <= q <= 1.0 for q in qs)
    +236        self._qs = sorted(set(qs))
    +237        self._axes = None if axes is None else tuple(axes)
    +238        self._member_id = member_id
    +239        self._n: int = 0
    +240        self._estimates: Optional[Tensor] = None
    +241
    +242    def update(self, sample: Sample):
    +243        tensor = sample.members[self._member_id]
    +244        sample_estimates = tensor.quantile(self._qs, dim=self._axes).astype(
    +245            "float64", copy=False
    +246        )
    +247
    +248        # reduced voxel count
    +249        n = int(tensor.size / np.prod(sample_estimates.shape_tuple[1:]))
    +250
    +251        if self._estimates is None:
    +252            assert self._n == 0
    +253            self._estimates = sample_estimates
    +254        else:
    +255            self._estimates = (self._n * self._estimates + n * sample_estimates) / (
    +256                self._n + n
    +257            )
    +258            assert self._estimates.dtype == "float64"
    +259
    +260        self._n += n
    +261
    +262    def finalize(self) -> Dict[DatasetPercentile, MeasureValue]:
    +263        if self._estimates is None:
    +264            return {}
    +265        else:
    +266            warnings.warn(
    +267                "Computed dataset percentiles naively by averaging percentiles of samples."
    +268            )
    +269            return {
    +270                DatasetPercentile(q=q, axes=self._axes, member_id=self._member_id): e
    +271                for q, e in zip(self._qs, self._estimates)
    +272            }
    +273
    +274
    +275class CrickPercentilesCalculator:
    +276    """to calculate dataset percentiles with the experimental [crick libray](https://github.com/dask/crick)"""
    +277
    +278    def __init__(
    +279        self,
    +280        member_id: MemberId,
    +281        axes: Optional[Sequence[AxisId]],
    +282        qs: Collection[float],
    +283    ):
    +284        warnings.warn(
    +285            "Computing dataset percentiles with experimental 'crick' library."
    +286        )
    +287        super().__init__()
    +288        assert all(0.0 <= q <= 1.0 for q in qs)
    +289        assert axes is None or "_percentiles" not in axes
    +290        self._qs = sorted(set(qs))
    +291        self._axes = None if axes is None else tuple(axes)
    +292        self._member_id = member_id
    +293        self._digest: Optional[List[TDigest]] = None
    +294        self._dims: Optional[Tuple[AxisId, ...]] = None
    +295        self._indices: Optional[Iterator[Tuple[int, ...]]] = None
    +296        self._shape: Optional[Tuple[int, ...]] = None
    +297
    +298    def _initialize(self, tensor_sizes: PerAxis[int]):
    +299        assert crick is not None
    +300        out_sizes: OrderedDict[AxisId, int] = collections.OrderedDict(
    +301            _percentiles=len(self._qs)
    +302        )
    +303        if self._axes is not None:
    +304            for d, s in tensor_sizes.items():
    +305                if d not in self._axes:
    +306                    out_sizes[d] = s
    +307
    +308        self._dims, self._shape = zip(*out_sizes.items())
    +309        d = int(np.prod(self._shape[1:]))  # type: ignore
    +310        self._digest = [TDigest() for _ in range(d)]
    +311        self._indices = product(*map(range, self._shape[1:]))
    +312
    +313    def update(self, part: Sample):
    +314        tensor = (
    +315            part.members[self._member_id]
    +316            if isinstance(part, Sample)
    +317            else part.members[self._member_id].data
    +318        )
    +319        assert "_percentiles" not in tensor.dims
    +320        if self._digest is None:
    +321            self._initialize(tensor.tagged_shape)
    +322
    +323        assert self._digest is not None
    +324        assert self._indices is not None
    +325        assert self._dims is not None
    +326        for i, idx in enumerate(self._indices):
    +327            self._digest[i].update(tensor[dict(zip(self._dims[1:], idx))])
    +328
    +329    def finalize(self) -> Dict[DatasetPercentile, MeasureValue]:
    +330        if self._digest is None:
    +331            return {}
    +332        else:
    +333            assert self._dims is not None
    +334            assert self._shape is not None
    +335
    +336            vs: NDArray[Any] = np.asarray(
    +337                [[d.quantile(q) for d in self._digest] for q in self._qs]
    +338            ).reshape(self._shape)
    +339            return {
    +340                DatasetPercentile(
    +341                    q=q, axes=self._axes, member_id=self._member_id
    +342                ): Tensor(v, dims=self._dims[1:])
    +343                for q, v in zip(self._qs, vs)
    +344            }
    +345
    +346
    +347if crick is None:
    +348    DatasetPercentilesCalculator: Type[
    +349        Union[MeanPercentilesCalculator, CrickPercentilesCalculator]
    +350    ] = MeanPercentilesCalculator
    +351else:
    +352    DatasetPercentilesCalculator = CrickPercentilesCalculator
    +353
    +354
    +355class NaiveSampleMeasureCalculator:
    +356    """wrapper for measures to match interface of other sample measure calculators"""
    +357
    +358    def __init__(self, member_id: MemberId, measure: SampleMeasure):
    +359        super().__init__()
    +360        self.tensor_name = member_id
    +361        self.measure = measure
    +362
    +363    def compute(self, sample: Sample) -> Dict[SampleMeasure, MeasureValue]:
    +364        return {self.measure: self.measure.compute(sample)}
    +365
    +366
    +367SampleMeasureCalculator = Union[
    +368    MeanCalculator,
    +369    MeanVarStdCalculator,
    +370    SamplePercentilesCalculator,
    +371    NaiveSampleMeasureCalculator,
    +372]
    +373DatasetMeasureCalculator = Union[
    +374    MeanCalculator, MeanVarStdCalculator, DatasetPercentilesCalculator
    +375]
    +376
    +377
    +378class StatsCalculator:
    +379    """Estimates dataset statistics and computes sample statistics efficiently"""
    +380
    +381    def __init__(
    +382        self,
    +383        measures: Collection[Measure],
    +384        initial_dataset_measures: Optional[
    +385            Mapping[DatasetMeasure, MeasureValue]
    +386        ] = None,
    +387    ):
    +388        super().__init__()
    +389        self.sample_count = 0
    +390        self.sample_calculators, self.dataset_calculators = get_measure_calculators(
    +391            measures
    +392        )
    +393        if not initial_dataset_measures:
    +394            self._current_dataset_measures: Optional[
    +395                Dict[DatasetMeasure, MeasureValue]
    +396            ] = None
    +397        else:
    +398            missing_dataset_meas = {
    +399                m
    +400                for m in measures
    +401                if isinstance(m, DatasetMeasureBase)
    +402                and m not in initial_dataset_measures
    +403            }
    +404            if missing_dataset_meas:
    +405                logger.debug(
    +406                    f"ignoring `initial_dataset_measure` as it is missing {missing_dataset_meas}"
    +407                )
    +408                self._current_dataset_measures = None
    +409            else:
    +410                self._current_dataset_measures = dict(initial_dataset_measures)
    +411
    +412    @property
    +413    def has_dataset_measures(self):
    +414        return self._current_dataset_measures is not None
    +415
    +416    def update(
    +417        self,
    +418        sample: Union[Sample, Iterable[Sample]],
    +419    ) -> None:
    +420        _ = self._update(sample)
    +421
    +422    def finalize(self) -> Dict[DatasetMeasure, MeasureValue]:
    +423        """returns aggregated dataset statistics"""
    +424        if self._current_dataset_measures is None:
    +425            self._current_dataset_measures = {}
    +426            for calc in self.dataset_calculators:
    +427                values = calc.finalize()
    +428                self._current_dataset_measures.update(values.items())
    +429
    +430        return self._current_dataset_measures
    +431
    +432    def update_and_get_all(
    +433        self,
    +434        sample: Union[Sample, Iterable[Sample]],
    +435    ) -> Dict[Measure, MeasureValue]:
    +436        """Returns sample as well as updated dataset statistics"""
    +437        last_sample = self._update(sample)
    +438        if last_sample is None:
    +439            raise ValueError("`sample` was not a `Sample`, nor did it yield any.")
    +440
    +441        return {**self._compute(last_sample), **self.finalize()}
    +442
    +443    def skip_update_and_get_all(self, sample: Sample) -> Dict[Measure, MeasureValue]:
    +444        """Returns sample as well as previously computed dataset statistics"""
    +445        return {**self._compute(sample), **self.finalize()}
    +446
    +447    def _compute(self, sample: Sample) -> Dict[SampleMeasure, MeasureValue]:
    +448        ret: Dict[SampleMeasure, MeasureValue] = {}
    +449        for calc in self.sample_calculators:
    +450            values = calc.compute(sample)
    +451            ret.update(values.items())
    +452
    +453        return ret
    +454
    +455    def _update(self, sample: Union[Sample, Iterable[Sample]]) -> Optional[Sample]:
    +456        self.sample_count += 1
    +457        samples = [sample] if isinstance(sample, Sample) else sample
    +458        last_sample = None
    +459        for el in samples:
    +460            last_sample = el
    +461            for calc in self.dataset_calculators:
    +462                calc.update(el)
    +463
    +464        self._current_dataset_measures = None
    +465        return last_sample
    +466
    +467
    +468def get_measure_calculators(
    +469    required_measures: Iterable[Measure],
    +470) -> Tuple[List[SampleMeasureCalculator], List[DatasetMeasureCalculator]]:
    +471    """determines which calculators are needed to compute the required measures efficiently"""
    +472
    +473    sample_calculators: List[SampleMeasureCalculator] = []
    +474    dataset_calculators: List[DatasetMeasureCalculator] = []
    +475
    +476    # split required measures into groups
    +477    required_sample_means: Set[SampleMean] = set()
    +478    required_dataset_means: Set[DatasetMean] = set()
    +479    required_sample_mean_var_std: Set[Union[SampleMean, SampleVar, SampleStd]] = set()
    +480    required_dataset_mean_var_std: Set[Union[DatasetMean, DatasetVar, DatasetStd]] = (
    +481        set()
    +482    )
    +483    required_sample_percentiles: Dict[
    +484        Tuple[MemberId, Optional[Tuple[AxisId, ...]]], Set[float]
    +485    ] = {}
    +486    required_dataset_percentiles: Dict[
    +487        Tuple[MemberId, Optional[Tuple[AxisId, ...]]], Set[float]
    +488    ] = {}
    +489
    +490    for rm in required_measures:
    +491        if isinstance(rm, SampleMean):
    +492            required_sample_means.add(rm)
    +493        elif isinstance(rm, DatasetMean):
    +494            required_dataset_means.add(rm)
    +495        elif isinstance(rm, (SampleVar, SampleStd)):
    +496            required_sample_mean_var_std.update(
    +497                {
    +498                    msv(axes=rm.axes, member_id=rm.member_id)
    +499                    for msv in (SampleMean, SampleStd, SampleVar)
    +500                }
    +501            )
    +502            assert rm in required_sample_mean_var_std
    +503        elif isinstance(rm, (DatasetVar, DatasetStd)):
    +504            required_dataset_mean_var_std.update(
    +505                {
    +506                    msv(axes=rm.axes, member_id=rm.member_id)
    +507                    for msv in (DatasetMean, DatasetStd, DatasetVar)
    +508                }
    +509            )
    +510            assert rm in required_dataset_mean_var_std
    +511        elif isinstance(rm, SampleQuantile):
    +512            required_sample_percentiles.setdefault((rm.member_id, rm.axes), set()).add(
    +513                rm.q
    +514            )
    +515        elif isinstance(rm, DatasetPercentile):
    +516            required_dataset_percentiles.setdefault((rm.member_id, rm.axes), set()).add(
    +517                rm.q
    +518            )
    +519        else:
    +520            assert_never(rm)
    +521
    +522    for rm in required_sample_means:
    +523        if rm in required_sample_mean_var_std:
    +524            # computed togehter with var and std
    +525            continue
    +526
    +527        sample_calculators.append(MeanCalculator(member_id=rm.member_id, axes=rm.axes))
    +528
    +529    for rm in required_sample_mean_var_std:
    +530        sample_calculators.append(
    +531            MeanVarStdCalculator(member_id=rm.member_id, axes=rm.axes)
    +532        )
    +533
    +534    for rm in required_dataset_means:
    +535        if rm in required_dataset_mean_var_std:
    +536            # computed togehter with var and std
    +537            continue
    +538
    +539        dataset_calculators.append(MeanCalculator(member_id=rm.member_id, axes=rm.axes))
    +540
    +541    for rm in required_dataset_mean_var_std:
    +542        dataset_calculators.append(
    +543            MeanVarStdCalculator(member_id=rm.member_id, axes=rm.axes)
    +544        )
    +545
    +546    for (tid, axes), qs in required_sample_percentiles.items():
    +547        sample_calculators.append(
    +548            SamplePercentilesCalculator(member_id=tid, axes=axes, qs=qs)
    +549        )
    +550
    +551    for (tid, axes), qs in required_dataset_percentiles.items():
    +552        dataset_calculators.append(
    +553            DatasetPercentilesCalculator(member_id=tid, axes=axes, qs=qs)
    +554        )
    +555
    +556    return sample_calculators, dataset_calculators
    +557
    +558
    +559def compute_dataset_measures(
    +560    measures: Iterable[DatasetMeasure], dataset: Iterable[Sample]
    +561) -> Dict[DatasetMeasure, MeasureValue]:
    +562    """compute all dataset `measures` for the given `dataset`"""
    +563    sample_calculators, calculators = get_measure_calculators(measures)
    +564    assert not sample_calculators
    +565
    +566    ret: Dict[DatasetMeasure, MeasureValue] = {}
    +567
    +568    for sample in dataset:
    +569        for calc in calculators:
    +570            calc.update(sample)
    +571
    +572    for calc in calculators:
    +573        ret.update(calc.finalize().items())
    +574
    +575    return ret
    +576
    +577
    +578def compute_sample_measures(
    +579    measures: Iterable[SampleMeasure], sample: Sample
    +580) -> Dict[SampleMeasure, MeasureValue]:
    +581    """compute all sample `measures` for the given `sample`"""
    +582    calculators, dataset_calculators = get_measure_calculators(measures)
    +583    assert not dataset_calculators
    +584    ret: Dict[SampleMeasure, MeasureValue] = {}
    +585
    +586    for calc in calculators:
    +587        ret.update(calc.compute(sample).items())
    +588
    +589    return ret
    +590
    +591
    +592def compute_measures(
    +593    measures: Iterable[Measure], dataset: Iterable[Sample]
    +594) -> Dict[Measure, MeasureValue]:
    +595    """compute all `measures` for the given `dataset`
    +596    sample measures are computed for the last sample in `dataset`"""
    +597    sample_calculators, dataset_calculators = get_measure_calculators(measures)
    +598    ret: Dict[Measure, MeasureValue] = {}
    +599    sample = None
    +600    for sample in dataset:
    +601        for calc in dataset_calculators:
    +602            calc.update(sample)
    +603    if sample is None:
    +604        raise ValueError("empty dataset")
    +605
    +606    for calc in dataset_calculators:
    +607        ret.update(calc.finalize().items())
    +608
    +609    for calc in sample_calculators:
    +610        ret.update(calc.compute(sample).items())
    +611
    +612    return ret
    +
    + + +
    +
    + +
    + + class + MeanCalculator: + + + +
    + +
     67class MeanCalculator:
    + 68    """to calculate sample and dataset mean for in-memory samples"""
    + 69
    + 70    def __init__(self, member_id: MemberId, axes: Optional[Sequence[AxisId]]):
    + 71        super().__init__()
    + 72        self._n: int = 0
    + 73        self._mean: Optional[Tensor] = None
    + 74        self._axes = None if axes is None else tuple(axes)
    + 75        self._member_id = member_id
    + 76        self._sample_mean = SampleMean(member_id=self._member_id, axes=self._axes)
    + 77        self._dataset_mean = DatasetMean(member_id=self._member_id, axes=self._axes)
    + 78
    + 79    def compute(self, sample: Sample) -> Dict[SampleMean, MeasureValue]:
    + 80        return {self._sample_mean: self._compute_impl(sample)}
    + 81
    + 82    def _compute_impl(self, sample: Sample) -> Tensor:
    + 83        tensor = sample.members[self._member_id].astype("float64", copy=False)
    + 84        return tensor.mean(dim=self._axes)
    + 85
    + 86    def update(self, sample: Sample) -> None:
    + 87        mean = self._compute_impl(sample)
    + 88        self._update_impl(sample.members[self._member_id], mean)
    + 89
    + 90    def compute_and_update(self, sample: Sample) -> Dict[SampleMean, MeasureValue]:
    + 91        mean = self._compute_impl(sample)
    + 92        self._update_impl(sample.members[self._member_id], mean)
    + 93        return {self._sample_mean: mean}
    + 94
    + 95    def _update_impl(self, tensor: Tensor, tensor_mean: Tensor):
    + 96        assert tensor_mean.dtype == "float64"
    + 97        # reduced voxel count
    + 98        n_b = int(tensor.size / tensor_mean.size)
    + 99
    +100        if self._mean is None:
    +101            assert self._n == 0
    +102            self._n = n_b
    +103            self._mean = tensor_mean
    +104        else:
    +105            assert self._n != 0
    +106            n_a = self._n
    +107            mean_old = self._mean
    +108            self._n = n_a + n_b
    +109            self._mean = (n_a * mean_old + n_b * tensor_mean) / self._n
    +110            assert self._mean.dtype == "float64"
    +111
    +112    def finalize(self) -> Dict[DatasetMean, MeasureValue]:
    +113        if self._mean is None:
    +114            return {}
    +115        else:
    +116            return {self._dataset_mean: self._mean}
    +
    + + +

    to calculate sample and dataset mean for in-memory samples

    +
    + + +
    + +
    + + MeanCalculator( member_id: bioimageio.spec.model.v0_5.TensorId, axes: Optional[Sequence[bioimageio.spec.model.v0_5.AxisId]]) + + + +
    + +
    70    def __init__(self, member_id: MemberId, axes: Optional[Sequence[AxisId]]):
    +71        super().__init__()
    +72        self._n: int = 0
    +73        self._mean: Optional[Tensor] = None
    +74        self._axes = None if axes is None else tuple(axes)
    +75        self._member_id = member_id
    +76        self._sample_mean = SampleMean(member_id=self._member_id, axes=self._axes)
    +77        self._dataset_mean = DatasetMean(member_id=self._member_id, axes=self._axes)
    +
    + + + + +
    +
    + +
    + + def + compute( self, sample: bioimageio.core.Sample) -> Dict[bioimageio.core.stat_measures.SampleMean, Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]: + + + +
    + +
    79    def compute(self, sample: Sample) -> Dict[SampleMean, MeasureValue]:
    +80        return {self._sample_mean: self._compute_impl(sample)}
    +
    + + + + +
    +
    + +
    + + def + update(self, sample: bioimageio.core.Sample) -> None: + + + +
    + +
    86    def update(self, sample: Sample) -> None:
    +87        mean = self._compute_impl(sample)
    +88        self._update_impl(sample.members[self._member_id], mean)
    +
    + + + + +
    +
    + +
    + + def + compute_and_update( self, sample: bioimageio.core.Sample) -> Dict[bioimageio.core.stat_measures.SampleMean, Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]: + + + +
    + +
    90    def compute_and_update(self, sample: Sample) -> Dict[SampleMean, MeasureValue]:
    +91        mean = self._compute_impl(sample)
    +92        self._update_impl(sample.members[self._member_id], mean)
    +93        return {self._sample_mean: mean}
    +
    + + + + +
    +
    + +
    + + def + finalize( self) -> Dict[bioimageio.core.stat_measures.DatasetMean, Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]: + + + +
    + +
    112    def finalize(self) -> Dict[DatasetMean, MeasureValue]:
    +113        if self._mean is None:
    +114            return {}
    +115        else:
    +116            return {self._dataset_mean: self._mean}
    +
    + + + + +
    +
    +
    + +
    + + class + MeanVarStdCalculator: + + + +
    + +
    119class MeanVarStdCalculator:
    +120    """to calculate sample and dataset mean, variance or standard deviation"""
    +121
    +122    def __init__(self, member_id: MemberId, axes: Optional[Sequence[AxisId]]):
    +123        super().__init__()
    +124        self._axes = None if axes is None else tuple(axes)
    +125        self._member_id = member_id
    +126        self._n: int = 0
    +127        self._mean: Optional[Tensor] = None
    +128        self._m2: Optional[Tensor] = None
    +129
    +130    def compute(
    +131        self, sample: Sample
    +132    ) -> Dict[Union[SampleMean, SampleVar, SampleStd], MeasureValue]:
    +133        tensor = sample.members[self._member_id]
    +134        mean = tensor.mean(dim=self._axes)
    +135        c = (tensor - mean).data
    +136        if self._axes is None:
    +137            n = tensor.size
    +138        else:
    +139            n = int(np.prod([tensor.sizes[d] for d in self._axes]))
    +140
    +141        var = xr.dot(c, c, dims=self._axes) / n
    +142        assert isinstance(var, xr.DataArray)
    +143        std = np.sqrt(var)
    +144        assert isinstance(std, xr.DataArray)
    +145        return {
    +146            SampleMean(axes=self._axes, member_id=self._member_id): mean,
    +147            SampleVar(axes=self._axes, member_id=self._member_id): Tensor.from_xarray(
    +148                var
    +149            ),
    +150            SampleStd(axes=self._axes, member_id=self._member_id): Tensor.from_xarray(
    +151                std
    +152            ),
    +153        }
    +154
    +155    def update(self, sample: Sample):
    +156        tensor = sample.members[self._member_id].astype("float64", copy=False)
    +157        mean_b = tensor.mean(dim=self._axes)
    +158        assert mean_b.dtype == "float64"
    +159        # reduced voxel count
    +160        n_b = int(tensor.size / mean_b.size)
    +161        m2_b = ((tensor - mean_b) ** 2).sum(dim=self._axes)
    +162        assert m2_b.dtype == "float64"
    +163        if self._mean is None:
    +164            assert self._m2 is None
    +165            self._n = n_b
    +166            self._mean = mean_b
    +167            self._m2 = m2_b
    +168        else:
    +169            n_a = self._n
    +170            mean_a = self._mean
    +171            m2_a = self._m2
    +172            self._n = n = n_a + n_b
    +173            self._mean = (n_a * mean_a + n_b * mean_b) / n
    +174            assert self._mean.dtype == "float64"
    +175            d = mean_b - mean_a
    +176            self._m2 = m2_a + m2_b + d**2 * n_a * n_b / n
    +177            assert self._m2.dtype == "float64"
    +178
    +179    def finalize(
    +180        self,
    +181    ) -> Dict[Union[DatasetMean, DatasetVar, DatasetStd], MeasureValue]:
    +182        if self._mean is None:
    +183            return {}
    +184        else:
    +185            assert self._m2 is not None
    +186            var = self._m2 / self._n
    +187            sqrt = np.sqrt(var)
    +188            if isinstance(sqrt, (int, float)):
    +189                # var and mean are scalar tensors, let's keep it consistent
    +190                sqrt = Tensor.from_xarray(xr.DataArray(sqrt))
    +191
    +192            assert isinstance(sqrt, Tensor), type(sqrt)
    +193            return {
    +194                DatasetMean(member_id=self._member_id, axes=self._axes): self._mean,
    +195                DatasetVar(member_id=self._member_id, axes=self._axes): var,
    +196                DatasetStd(member_id=self._member_id, axes=self._axes): sqrt,
    +197            }
    +
    + + +

    to calculate sample and dataset mean, variance or standard deviation

    +
    + + +
    + +
    + + MeanVarStdCalculator( member_id: bioimageio.spec.model.v0_5.TensorId, axes: Optional[Sequence[bioimageio.spec.model.v0_5.AxisId]]) + + + +
    + +
    122    def __init__(self, member_id: MemberId, axes: Optional[Sequence[AxisId]]):
    +123        super().__init__()
    +124        self._axes = None if axes is None else tuple(axes)
    +125        self._member_id = member_id
    +126        self._n: int = 0
    +127        self._mean: Optional[Tensor] = None
    +128        self._m2: Optional[Tensor] = None
    +
    + + + + +
    +
    + +
    + + def + compute( self, sample: bioimageio.core.Sample) -> Dict[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleStd], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]: + + + +
    + +
    130    def compute(
    +131        self, sample: Sample
    +132    ) -> Dict[Union[SampleMean, SampleVar, SampleStd], MeasureValue]:
    +133        tensor = sample.members[self._member_id]
    +134        mean = tensor.mean(dim=self._axes)
    +135        c = (tensor - mean).data
    +136        if self._axes is None:
    +137            n = tensor.size
    +138        else:
    +139            n = int(np.prod([tensor.sizes[d] for d in self._axes]))
    +140
    +141        var = xr.dot(c, c, dims=self._axes) / n
    +142        assert isinstance(var, xr.DataArray)
    +143        std = np.sqrt(var)
    +144        assert isinstance(std, xr.DataArray)
    +145        return {
    +146            SampleMean(axes=self._axes, member_id=self._member_id): mean,
    +147            SampleVar(axes=self._axes, member_id=self._member_id): Tensor.from_xarray(
    +148                var
    +149            ),
    +150            SampleStd(axes=self._axes, member_id=self._member_id): Tensor.from_xarray(
    +151                std
    +152            ),
    +153        }
    +
    + + + + +
    +
    + +
    + + def + update(self, sample: bioimageio.core.Sample): + + + +
    + +
    155    def update(self, sample: Sample):
    +156        tensor = sample.members[self._member_id].astype("float64", copy=False)
    +157        mean_b = tensor.mean(dim=self._axes)
    +158        assert mean_b.dtype == "float64"
    +159        # reduced voxel count
    +160        n_b = int(tensor.size / mean_b.size)
    +161        m2_b = ((tensor - mean_b) ** 2).sum(dim=self._axes)
    +162        assert m2_b.dtype == "float64"
    +163        if self._mean is None:
    +164            assert self._m2 is None
    +165            self._n = n_b
    +166            self._mean = mean_b
    +167            self._m2 = m2_b
    +168        else:
    +169            n_a = self._n
    +170            mean_a = self._mean
    +171            m2_a = self._m2
    +172            self._n = n = n_a + n_b
    +173            self._mean = (n_a * mean_a + n_b * mean_b) / n
    +174            assert self._mean.dtype == "float64"
    +175            d = mean_b - mean_a
    +176            self._m2 = m2_a + m2_b + d**2 * n_a * n_b / n
    +177            assert self._m2.dtype == "float64"
    +
    + + + + +
    +
    + +
    + + def + finalize( self) -> Dict[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetStd], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]: + + + +
    + +
    179    def finalize(
    +180        self,
    +181    ) -> Dict[Union[DatasetMean, DatasetVar, DatasetStd], MeasureValue]:
    +182        if self._mean is None:
    +183            return {}
    +184        else:
    +185            assert self._m2 is not None
    +186            var = self._m2 / self._n
    +187            sqrt = np.sqrt(var)
    +188            if isinstance(sqrt, (int, float)):
    +189                # var and mean are scalar tensors, let's keep it consistent
    +190                sqrt = Tensor.from_xarray(xr.DataArray(sqrt))
    +191
    +192            assert isinstance(sqrt, Tensor), type(sqrt)
    +193            return {
    +194                DatasetMean(member_id=self._member_id, axes=self._axes): self._mean,
    +195                DatasetVar(member_id=self._member_id, axes=self._axes): var,
    +196                DatasetStd(member_id=self._member_id, axes=self._axes): sqrt,
    +197            }
    +
    + + + + +
    +
    +
    + +
    + + class + SamplePercentilesCalculator: + + + +
    + +
    200class SamplePercentilesCalculator:
    +201    """to calculate sample percentiles"""
    +202
    +203    def __init__(
    +204        self,
    +205        member_id: MemberId,
    +206        axes: Optional[Sequence[AxisId]],
    +207        qs: Collection[float],
    +208    ):
    +209        super().__init__()
    +210        assert all(0.0 <= q <= 1.0 for q in qs)
    +211        self._qs = sorted(set(qs))
    +212        self._axes = None if axes is None else tuple(axes)
    +213        self._member_id = member_id
    +214
    +215    def compute(self, sample: Sample) -> Dict[SampleQuantile, MeasureValue]:
    +216        tensor = sample.members[self._member_id]
    +217        ps = tensor.quantile(self._qs, dim=self._axes)
    +218        return {
    +219            SampleQuantile(q=q, axes=self._axes, member_id=self._member_id): p
    +220            for q, p in zip(self._qs, ps)
    +221        }
    +
    + + +

    to calculate sample percentiles

    +
    + + +
    + +
    + + SamplePercentilesCalculator( member_id: bioimageio.spec.model.v0_5.TensorId, axes: Optional[Sequence[bioimageio.spec.model.v0_5.AxisId]], qs: Collection[float]) + + + +
    + +
    203    def __init__(
    +204        self,
    +205        member_id: MemberId,
    +206        axes: Optional[Sequence[AxisId]],
    +207        qs: Collection[float],
    +208    ):
    +209        super().__init__()
    +210        assert all(0.0 <= q <= 1.0 for q in qs)
    +211        self._qs = sorted(set(qs))
    +212        self._axes = None if axes is None else tuple(axes)
    +213        self._member_id = member_id
    +
    + + + + +
    +
    + +
    + + def + compute( self, sample: bioimageio.core.Sample) -> Dict[bioimageio.core.stat_measures.SampleQuantile, Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]: + + + +
    + +
    215    def compute(self, sample: Sample) -> Dict[SampleQuantile, MeasureValue]:
    +216        tensor = sample.members[self._member_id]
    +217        ps = tensor.quantile(self._qs, dim=self._axes)
    +218        return {
    +219            SampleQuantile(q=q, axes=self._axes, member_id=self._member_id): p
    +220            for q, p in zip(self._qs, ps)
    +221        }
    +
    + + + + +
    +
    +
    + +
    + + class + MeanPercentilesCalculator: + + + +
    + +
    224class MeanPercentilesCalculator:
    +225    """to calculate dataset percentiles heuristically by averaging across samples
    +226    **note**: the returned dataset percentiles are an estiamte and **not mathematically correct**
    +227    """
    +228
    +229    def __init__(
    +230        self,
    +231        member_id: MemberId,
    +232        axes: Optional[Sequence[AxisId]],
    +233        qs: Collection[float],
    +234    ):
    +235        super().__init__()
    +236        assert all(0.0 <= q <= 1.0 for q in qs)
    +237        self._qs = sorted(set(qs))
    +238        self._axes = None if axes is None else tuple(axes)
    +239        self._member_id = member_id
    +240        self._n: int = 0
    +241        self._estimates: Optional[Tensor] = None
    +242
    +243    def update(self, sample: Sample):
    +244        tensor = sample.members[self._member_id]
    +245        sample_estimates = tensor.quantile(self._qs, dim=self._axes).astype(
    +246            "float64", copy=False
    +247        )
    +248
    +249        # reduced voxel count
    +250        n = int(tensor.size / np.prod(sample_estimates.shape_tuple[1:]))
    +251
    +252        if self._estimates is None:
    +253            assert self._n == 0
    +254            self._estimates = sample_estimates
    +255        else:
    +256            self._estimates = (self._n * self._estimates + n * sample_estimates) / (
    +257                self._n + n
    +258            )
    +259            assert self._estimates.dtype == "float64"
    +260
    +261        self._n += n
    +262
    +263    def finalize(self) -> Dict[DatasetPercentile, MeasureValue]:
    +264        if self._estimates is None:
    +265            return {}
    +266        else:
    +267            warnings.warn(
    +268                "Computed dataset percentiles naively by averaging percentiles of samples."
    +269            )
    +270            return {
    +271                DatasetPercentile(q=q, axes=self._axes, member_id=self._member_id): e
    +272                for q, e in zip(self._qs, self._estimates)
    +273            }
    +
    + + +

    to calculate dataset percentiles heuristically by averaging across samples +note: the returned dataset percentiles are an estiamte and not mathematically correct

    +
    + + +
    + +
    + + MeanPercentilesCalculator( member_id: bioimageio.spec.model.v0_5.TensorId, axes: Optional[Sequence[bioimageio.spec.model.v0_5.AxisId]], qs: Collection[float]) + + + +
    + +
    229    def __init__(
    +230        self,
    +231        member_id: MemberId,
    +232        axes: Optional[Sequence[AxisId]],
    +233        qs: Collection[float],
    +234    ):
    +235        super().__init__()
    +236        assert all(0.0 <= q <= 1.0 for q in qs)
    +237        self._qs = sorted(set(qs))
    +238        self._axes = None if axes is None else tuple(axes)
    +239        self._member_id = member_id
    +240        self._n: int = 0
    +241        self._estimates: Optional[Tensor] = None
    +
    + + + + +
    +
    + +
    + + def + update(self, sample: bioimageio.core.Sample): + + + +
    + +
    243    def update(self, sample: Sample):
    +244        tensor = sample.members[self._member_id]
    +245        sample_estimates = tensor.quantile(self._qs, dim=self._axes).astype(
    +246            "float64", copy=False
    +247        )
    +248
    +249        # reduced voxel count
    +250        n = int(tensor.size / np.prod(sample_estimates.shape_tuple[1:]))
    +251
    +252        if self._estimates is None:
    +253            assert self._n == 0
    +254            self._estimates = sample_estimates
    +255        else:
    +256            self._estimates = (self._n * self._estimates + n * sample_estimates) / (
    +257                self._n + n
    +258            )
    +259            assert self._estimates.dtype == "float64"
    +260
    +261        self._n += n
    +
    + + + + +
    +
    + +
    + + def + finalize( self) -> Dict[bioimageio.core.stat_measures.DatasetPercentile, Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]: + + + +
    + +
    263    def finalize(self) -> Dict[DatasetPercentile, MeasureValue]:
    +264        if self._estimates is None:
    +265            return {}
    +266        else:
    +267            warnings.warn(
    +268                "Computed dataset percentiles naively by averaging percentiles of samples."
    +269            )
    +270            return {
    +271                DatasetPercentile(q=q, axes=self._axes, member_id=self._member_id): e
    +272                for q, e in zip(self._qs, self._estimates)
    +273            }
    +
    + + + + +
    +
    +
    + +
    + + class + CrickPercentilesCalculator: + + + +
    + +
    276class CrickPercentilesCalculator:
    +277    """to calculate dataset percentiles with the experimental [crick libray](https://github.com/dask/crick)"""
    +278
    +279    def __init__(
    +280        self,
    +281        member_id: MemberId,
    +282        axes: Optional[Sequence[AxisId]],
    +283        qs: Collection[float],
    +284    ):
    +285        warnings.warn(
    +286            "Computing dataset percentiles with experimental 'crick' library."
    +287        )
    +288        super().__init__()
    +289        assert all(0.0 <= q <= 1.0 for q in qs)
    +290        assert axes is None or "_percentiles" not in axes
    +291        self._qs = sorted(set(qs))
    +292        self._axes = None if axes is None else tuple(axes)
    +293        self._member_id = member_id
    +294        self._digest: Optional[List[TDigest]] = None
    +295        self._dims: Optional[Tuple[AxisId, ...]] = None
    +296        self._indices: Optional[Iterator[Tuple[int, ...]]] = None
    +297        self._shape: Optional[Tuple[int, ...]] = None
    +298
    +299    def _initialize(self, tensor_sizes: PerAxis[int]):
    +300        assert crick is not None
    +301        out_sizes: OrderedDict[AxisId, int] = collections.OrderedDict(
    +302            _percentiles=len(self._qs)
    +303        )
    +304        if self._axes is not None:
    +305            for d, s in tensor_sizes.items():
    +306                if d not in self._axes:
    +307                    out_sizes[d] = s
    +308
    +309        self._dims, self._shape = zip(*out_sizes.items())
    +310        d = int(np.prod(self._shape[1:]))  # type: ignore
    +311        self._digest = [TDigest() for _ in range(d)]
    +312        self._indices = product(*map(range, self._shape[1:]))
    +313
    +314    def update(self, part: Sample):
    +315        tensor = (
    +316            part.members[self._member_id]
    +317            if isinstance(part, Sample)
    +318            else part.members[self._member_id].data
    +319        )
    +320        assert "_percentiles" not in tensor.dims
    +321        if self._digest is None:
    +322            self._initialize(tensor.tagged_shape)
    +323
    +324        assert self._digest is not None
    +325        assert self._indices is not None
    +326        assert self._dims is not None
    +327        for i, idx in enumerate(self._indices):
    +328            self._digest[i].update(tensor[dict(zip(self._dims[1:], idx))])
    +329
    +330    def finalize(self) -> Dict[DatasetPercentile, MeasureValue]:
    +331        if self._digest is None:
    +332            return {}
    +333        else:
    +334            assert self._dims is not None
    +335            assert self._shape is not None
    +336
    +337            vs: NDArray[Any] = np.asarray(
    +338                [[d.quantile(q) for d in self._digest] for q in self._qs]
    +339            ).reshape(self._shape)
    +340            return {
    +341                DatasetPercentile(
    +342                    q=q, axes=self._axes, member_id=self._member_id
    +343                ): Tensor(v, dims=self._dims[1:])
    +344                for q, v in zip(self._qs, vs)
    +345            }
    +
    + + +

    to calculate dataset percentiles with the experimental crick libray

    +
    + + +
    + +
    + + CrickPercentilesCalculator( member_id: bioimageio.spec.model.v0_5.TensorId, axes: Optional[Sequence[bioimageio.spec.model.v0_5.AxisId]], qs: Collection[float]) + + + +
    + +
    279    def __init__(
    +280        self,
    +281        member_id: MemberId,
    +282        axes: Optional[Sequence[AxisId]],
    +283        qs: Collection[float],
    +284    ):
    +285        warnings.warn(
    +286            "Computing dataset percentiles with experimental 'crick' library."
    +287        )
    +288        super().__init__()
    +289        assert all(0.0 <= q <= 1.0 for q in qs)
    +290        assert axes is None or "_percentiles" not in axes
    +291        self._qs = sorted(set(qs))
    +292        self._axes = None if axes is None else tuple(axes)
    +293        self._member_id = member_id
    +294        self._digest: Optional[List[TDigest]] = None
    +295        self._dims: Optional[Tuple[AxisId, ...]] = None
    +296        self._indices: Optional[Iterator[Tuple[int, ...]]] = None
    +297        self._shape: Optional[Tuple[int, ...]] = None
    +
    + + + + +
    +
    + +
    + + def + update(self, part: bioimageio.core.Sample): + + + +
    + +
    314    def update(self, part: Sample):
    +315        tensor = (
    +316            part.members[self._member_id]
    +317            if isinstance(part, Sample)
    +318            else part.members[self._member_id].data
    +319        )
    +320        assert "_percentiles" not in tensor.dims
    +321        if self._digest is None:
    +322            self._initialize(tensor.tagged_shape)
    +323
    +324        assert self._digest is not None
    +325        assert self._indices is not None
    +326        assert self._dims is not None
    +327        for i, idx in enumerate(self._indices):
    +328            self._digest[i].update(tensor[dict(zip(self._dims[1:], idx))])
    +
    + + + + +
    +
    + +
    + + def + finalize( self) -> Dict[bioimageio.core.stat_measures.DatasetPercentile, Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]: + + + +
    + +
    330    def finalize(self) -> Dict[DatasetPercentile, MeasureValue]:
    +331        if self._digest is None:
    +332            return {}
    +333        else:
    +334            assert self._dims is not None
    +335            assert self._shape is not None
    +336
    +337            vs: NDArray[Any] = np.asarray(
    +338                [[d.quantile(q) for d in self._digest] for q in self._qs]
    +339            ).reshape(self._shape)
    +340            return {
    +341                DatasetPercentile(
    +342                    q=q, axes=self._axes, member_id=self._member_id
    +343                ): Tensor(v, dims=self._dims[1:])
    +344                for q, v in zip(self._qs, vs)
    +345            }
    +
    + + + + +
    +
    +
    + +
    + + class + NaiveSampleMeasureCalculator: + + + +
    + +
    356class NaiveSampleMeasureCalculator:
    +357    """wrapper for measures to match interface of other sample measure calculators"""
    +358
    +359    def __init__(self, member_id: MemberId, measure: SampleMeasure):
    +360        super().__init__()
    +361        self.tensor_name = member_id
    +362        self.measure = measure
    +363
    +364    def compute(self, sample: Sample) -> Dict[SampleMeasure, MeasureValue]:
    +365        return {self.measure: self.measure.compute(sample)}
    +
    + + +

    wrapper for measures to match interface of other sample measure calculators

    +
    + + +
    + +
    + + NaiveSampleMeasureCalculator( member_id: bioimageio.spec.model.v0_5.TensorId, measure: Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]) + + + +
    + +
    359    def __init__(self, member_id: MemberId, measure: SampleMeasure):
    +360        super().__init__()
    +361        self.tensor_name = member_id
    +362        self.measure = measure
    +
    + + + + +
    +
    +
    + tensor_name + + +
    + + + + +
    +
    +
    + measure + + +
    + + + + +
    +
    + +
    + + def + compute( self, sample: bioimageio.core.Sample) -> Dict[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]: + + + +
    + +
    364    def compute(self, sample: Sample) -> Dict[SampleMeasure, MeasureValue]:
    +365        return {self.measure: self.measure.compute(sample)}
    +
    + + + + +
    +
    +
    +
    + SampleMeasureCalculator = + + typing.Union[MeanCalculator, MeanVarStdCalculator, SamplePercentilesCalculator, NaiveSampleMeasureCalculator] + + +
    + + + + +
    +
    +
    + DatasetMeasureCalculator = + + typing.Union[MeanCalculator, MeanVarStdCalculator, MeanPercentilesCalculator] + + +
    + + + + +
    +
    + +
    + + class + StatsCalculator: + + + +
    + +
    379class StatsCalculator:
    +380    """Estimates dataset statistics and computes sample statistics efficiently"""
    +381
    +382    def __init__(
    +383        self,
    +384        measures: Collection[Measure],
    +385        initial_dataset_measures: Optional[
    +386            Mapping[DatasetMeasure, MeasureValue]
    +387        ] = None,
    +388    ):
    +389        super().__init__()
    +390        self.sample_count = 0
    +391        self.sample_calculators, self.dataset_calculators = get_measure_calculators(
    +392            measures
    +393        )
    +394        if not initial_dataset_measures:
    +395            self._current_dataset_measures: Optional[
    +396                Dict[DatasetMeasure, MeasureValue]
    +397            ] = None
    +398        else:
    +399            missing_dataset_meas = {
    +400                m
    +401                for m in measures
    +402                if isinstance(m, DatasetMeasureBase)
    +403                and m not in initial_dataset_measures
    +404            }
    +405            if missing_dataset_meas:
    +406                logger.debug(
    +407                    f"ignoring `initial_dataset_measure` as it is missing {missing_dataset_meas}"
    +408                )
    +409                self._current_dataset_measures = None
    +410            else:
    +411                self._current_dataset_measures = dict(initial_dataset_measures)
    +412
    +413    @property
    +414    def has_dataset_measures(self):
    +415        return self._current_dataset_measures is not None
    +416
    +417    def update(
    +418        self,
    +419        sample: Union[Sample, Iterable[Sample]],
    +420    ) -> None:
    +421        _ = self._update(sample)
    +422
    +423    def finalize(self) -> Dict[DatasetMeasure, MeasureValue]:
    +424        """returns aggregated dataset statistics"""
    +425        if self._current_dataset_measures is None:
    +426            self._current_dataset_measures = {}
    +427            for calc in self.dataset_calculators:
    +428                values = calc.finalize()
    +429                self._current_dataset_measures.update(values.items())
    +430
    +431        return self._current_dataset_measures
    +432
    +433    def update_and_get_all(
    +434        self,
    +435        sample: Union[Sample, Iterable[Sample]],
    +436    ) -> Dict[Measure, MeasureValue]:
    +437        """Returns sample as well as updated dataset statistics"""
    +438        last_sample = self._update(sample)
    +439        if last_sample is None:
    +440            raise ValueError("`sample` was not a `Sample`, nor did it yield any.")
    +441
    +442        return {**self._compute(last_sample), **self.finalize()}
    +443
    +444    def skip_update_and_get_all(self, sample: Sample) -> Dict[Measure, MeasureValue]:
    +445        """Returns sample as well as previously computed dataset statistics"""
    +446        return {**self._compute(sample), **self.finalize()}
    +447
    +448    def _compute(self, sample: Sample) -> Dict[SampleMeasure, MeasureValue]:
    +449        ret: Dict[SampleMeasure, MeasureValue] = {}
    +450        for calc in self.sample_calculators:
    +451            values = calc.compute(sample)
    +452            ret.update(values.items())
    +453
    +454        return ret
    +455
    +456    def _update(self, sample: Union[Sample, Iterable[Sample]]) -> Optional[Sample]:
    +457        self.sample_count += 1
    +458        samples = [sample] if isinstance(sample, Sample) else sample
    +459        last_sample = None
    +460        for el in samples:
    +461            last_sample = el
    +462            for calc in self.dataset_calculators:
    +463                calc.update(el)
    +464
    +465        self._current_dataset_measures = None
    +466        return last_sample
    +
    + + +

    Estimates dataset statistics and computes sample statistics efficiently

    +
    + + +
    + +
    + + StatsCalculator( measures: Collection[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], initial_dataset_measures: Optional[Mapping[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]]] = None) + + + +
    + +
    382    def __init__(
    +383        self,
    +384        measures: Collection[Measure],
    +385        initial_dataset_measures: Optional[
    +386            Mapping[DatasetMeasure, MeasureValue]
    +387        ] = None,
    +388    ):
    +389        super().__init__()
    +390        self.sample_count = 0
    +391        self.sample_calculators, self.dataset_calculators = get_measure_calculators(
    +392            measures
    +393        )
    +394        if not initial_dataset_measures:
    +395            self._current_dataset_measures: Optional[
    +396                Dict[DatasetMeasure, MeasureValue]
    +397            ] = None
    +398        else:
    +399            missing_dataset_meas = {
    +400                m
    +401                for m in measures
    +402                if isinstance(m, DatasetMeasureBase)
    +403                and m not in initial_dataset_measures
    +404            }
    +405            if missing_dataset_meas:
    +406                logger.debug(
    +407                    f"ignoring `initial_dataset_measure` as it is missing {missing_dataset_meas}"
    +408                )
    +409                self._current_dataset_measures = None
    +410            else:
    +411                self._current_dataset_measures = dict(initial_dataset_measures)
    +
    + + + + +
    +
    +
    + sample_count + + +
    + + + + +
    +
    + +
    + has_dataset_measures + + + +
    + +
    413    @property
    +414    def has_dataset_measures(self):
    +415        return self._current_dataset_measures is not None
    +
    + + + + +
    +
    + +
    + + def + update( self, sample: Union[bioimageio.core.Sample, Iterable[bioimageio.core.Sample]]) -> None: + + + +
    + +
    417    def update(
    +418        self,
    +419        sample: Union[Sample, Iterable[Sample]],
    +420    ) -> None:
    +421        _ = self._update(sample)
    +
    + + + + +
    +
    + +
    + + def + finalize( self) -> Dict[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]: + + + +
    + +
    423    def finalize(self) -> Dict[DatasetMeasure, MeasureValue]:
    +424        """returns aggregated dataset statistics"""
    +425        if self._current_dataset_measures is None:
    +426            self._current_dataset_measures = {}
    +427            for calc in self.dataset_calculators:
    +428                values = calc.finalize()
    +429                self._current_dataset_measures.update(values.items())
    +430
    +431        return self._current_dataset_measures
    +
    + + +

    returns aggregated dataset statistics

    +
    + + +
    +
    + +
    + + def + update_and_get_all( self, sample: Union[bioimageio.core.Sample, Iterable[bioimageio.core.Sample]]) -> Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]: + + + +
    + +
    433    def update_and_get_all(
    +434        self,
    +435        sample: Union[Sample, Iterable[Sample]],
    +436    ) -> Dict[Measure, MeasureValue]:
    +437        """Returns sample as well as updated dataset statistics"""
    +438        last_sample = self._update(sample)
    +439        if last_sample is None:
    +440            raise ValueError("`sample` was not a `Sample`, nor did it yield any.")
    +441
    +442        return {**self._compute(last_sample), **self.finalize()}
    +
    + + +

    Returns sample as well as updated dataset statistics

    +
    + + +
    +
    + +
    + + def + skip_update_and_get_all( self, sample: bioimageio.core.Sample) -> Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]: + + + +
    + +
    444    def skip_update_and_get_all(self, sample: Sample) -> Dict[Measure, MeasureValue]:
    +445        """Returns sample as well as previously computed dataset statistics"""
    +446        return {**self._compute(sample), **self.finalize()}
    +
    + + +

    Returns sample as well as previously computed dataset statistics

    +
    + + +
    +
    +
    + +
    + + def + get_measure_calculators( required_measures: Iterable[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)]]) -> Tuple[List[Union[MeanCalculator, MeanVarStdCalculator, SamplePercentilesCalculator, NaiveSampleMeasureCalculator]], List[Union[MeanCalculator, MeanVarStdCalculator, MeanPercentilesCalculator]]]: + + + +
    + +
    469def get_measure_calculators(
    +470    required_measures: Iterable[Measure],
    +471) -> Tuple[List[SampleMeasureCalculator], List[DatasetMeasureCalculator]]:
    +472    """determines which calculators are needed to compute the required measures efficiently"""
    +473
    +474    sample_calculators: List[SampleMeasureCalculator] = []
    +475    dataset_calculators: List[DatasetMeasureCalculator] = []
    +476
    +477    # split required measures into groups
    +478    required_sample_means: Set[SampleMean] = set()
    +479    required_dataset_means: Set[DatasetMean] = set()
    +480    required_sample_mean_var_std: Set[Union[SampleMean, SampleVar, SampleStd]] = set()
    +481    required_dataset_mean_var_std: Set[Union[DatasetMean, DatasetVar, DatasetStd]] = (
    +482        set()
    +483    )
    +484    required_sample_percentiles: Dict[
    +485        Tuple[MemberId, Optional[Tuple[AxisId, ...]]], Set[float]
    +486    ] = {}
    +487    required_dataset_percentiles: Dict[
    +488        Tuple[MemberId, Optional[Tuple[AxisId, ...]]], Set[float]
    +489    ] = {}
    +490
    +491    for rm in required_measures:
    +492        if isinstance(rm, SampleMean):
    +493            required_sample_means.add(rm)
    +494        elif isinstance(rm, DatasetMean):
    +495            required_dataset_means.add(rm)
    +496        elif isinstance(rm, (SampleVar, SampleStd)):
    +497            required_sample_mean_var_std.update(
    +498                {
    +499                    msv(axes=rm.axes, member_id=rm.member_id)
    +500                    for msv in (SampleMean, SampleStd, SampleVar)
    +501                }
    +502            )
    +503            assert rm in required_sample_mean_var_std
    +504        elif isinstance(rm, (DatasetVar, DatasetStd)):
    +505            required_dataset_mean_var_std.update(
    +506                {
    +507                    msv(axes=rm.axes, member_id=rm.member_id)
    +508                    for msv in (DatasetMean, DatasetStd, DatasetVar)
    +509                }
    +510            )
    +511            assert rm in required_dataset_mean_var_std
    +512        elif isinstance(rm, SampleQuantile):
    +513            required_sample_percentiles.setdefault((rm.member_id, rm.axes), set()).add(
    +514                rm.q
    +515            )
    +516        elif isinstance(rm, DatasetPercentile):
    +517            required_dataset_percentiles.setdefault((rm.member_id, rm.axes), set()).add(
    +518                rm.q
    +519            )
    +520        else:
    +521            assert_never(rm)
    +522
    +523    for rm in required_sample_means:
    +524        if rm in required_sample_mean_var_std:
    +525            # computed togehter with var and std
    +526            continue
    +527
    +528        sample_calculators.append(MeanCalculator(member_id=rm.member_id, axes=rm.axes))
    +529
    +530    for rm in required_sample_mean_var_std:
    +531        sample_calculators.append(
    +532            MeanVarStdCalculator(member_id=rm.member_id, axes=rm.axes)
    +533        )
    +534
    +535    for rm in required_dataset_means:
    +536        if rm in required_dataset_mean_var_std:
    +537            # computed togehter with var and std
    +538            continue
    +539
    +540        dataset_calculators.append(MeanCalculator(member_id=rm.member_id, axes=rm.axes))
    +541
    +542    for rm in required_dataset_mean_var_std:
    +543        dataset_calculators.append(
    +544            MeanVarStdCalculator(member_id=rm.member_id, axes=rm.axes)
    +545        )
    +546
    +547    for (tid, axes), qs in required_sample_percentiles.items():
    +548        sample_calculators.append(
    +549            SamplePercentilesCalculator(member_id=tid, axes=axes, qs=qs)
    +550        )
    +551
    +552    for (tid, axes), qs in required_dataset_percentiles.items():
    +553        dataset_calculators.append(
    +554            DatasetPercentilesCalculator(member_id=tid, axes=axes, qs=qs)
    +555        )
    +556
    +557    return sample_calculators, dataset_calculators
    +
    + + +

    determines which calculators are needed to compute the required measures efficiently

    +
    + + +
    +
    + +
    + + def + compute_dataset_measures( measures: Iterable[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], dataset: Iterable[bioimageio.core.Sample]) -> Dict[Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]: + + + +
    + +
    560def compute_dataset_measures(
    +561    measures: Iterable[DatasetMeasure], dataset: Iterable[Sample]
    +562) -> Dict[DatasetMeasure, MeasureValue]:
    +563    """compute all dataset `measures` for the given `dataset`"""
    +564    sample_calculators, calculators = get_measure_calculators(measures)
    +565    assert not sample_calculators
    +566
    +567    ret: Dict[DatasetMeasure, MeasureValue] = {}
    +568
    +569    for sample in dataset:
    +570        for calc in calculators:
    +571            calc.update(sample)
    +572
    +573    for calc in calculators:
    +574        ret.update(calc.finalize().items())
    +575
    +576    return ret
    +
    + + +

    compute all dataset measures for the given dataset

    +
    + + +
    +
    + +
    + + def + compute_sample_measures( measures: Iterable[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], sample: bioimageio.core.Sample) -> Dict[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]: + + + +
    + +
    579def compute_sample_measures(
    +580    measures: Iterable[SampleMeasure], sample: Sample
    +581) -> Dict[SampleMeasure, MeasureValue]:
    +582    """compute all sample `measures` for the given `sample`"""
    +583    calculators, dataset_calculators = get_measure_calculators(measures)
    +584    assert not dataset_calculators
    +585    ret: Dict[SampleMeasure, MeasureValue] = {}
    +586
    +587    for calc in calculators:
    +588        ret.update(calc.compute(sample).items())
    +589
    +590    return ret
    +
    + + +

    compute all sample measures for the given sample

    +
    + + +
    +
    + +
    + + def + compute_measures( measures: Iterable[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], dataset: Iterable[bioimageio.core.Sample]) -> Dict[Annotated[Union[Annotated[Union[bioimageio.core.stat_measures.SampleMean, bioimageio.core.stat_measures.SampleStd, bioimageio.core.stat_measures.SampleVar, bioimageio.core.stat_measures.SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.core.stat_measures.DatasetMean, bioimageio.core.stat_measures.DatasetStd, bioimageio.core.stat_measures.DatasetVar, bioimageio.core.stat_measures.DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]]: + + + +
    + +
    593def compute_measures(
    +594    measures: Iterable[Measure], dataset: Iterable[Sample]
    +595) -> Dict[Measure, MeasureValue]:
    +596    """compute all `measures` for the given `dataset`
    +597    sample measures are computed for the last sample in `dataset`"""
    +598    sample_calculators, dataset_calculators = get_measure_calculators(measures)
    +599    ret: Dict[Measure, MeasureValue] = {}
    +600    sample = None
    +601    for sample in dataset:
    +602        for calc in dataset_calculators:
    +603            calc.update(sample)
    +604    if sample is None:
    +605        raise ValueError("empty dataset")
    +606
    +607    for calc in dataset_calculators:
    +608        ret.update(calc.finalize().items())
    +609
    +610    for calc in sample_calculators:
    +611        ret.update(calc.compute(sample).items())
    +612
    +613    return ret
    +
    + + +

    compute all measures for the given dataset +sample measures are computed for the last sample in dataset

    +
    + + +
    +
    + +
    + + class + TDigest: + + + +
    + +
    56    class TDigest:
    +57        def update(self, obj: Any):
    +58            pass
    +59
    +60        def quantile(self, q: Any) -> Any:
    +61            pass
    +
    + + + + +
    + +
    + + def + update(self, obj: Any): + + + +
    + +
    57        def update(self, obj: Any):
    +58            pass
    +
    + + + + +
    +
    + +
    + + def + quantile(self, q: Any) -> Any: + + + +
    + +
    60        def quantile(self, q: Any) -> Any:
    +61            pass
    +
    + + + + +
    +
    +
    +
    + DatasetPercentilesCalculator: Type[Union[MeanPercentilesCalculator, CrickPercentilesCalculator]] = +<class 'MeanPercentilesCalculator'> + + +
    + + + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/core/stat_measures.html b/bioimageio/core/stat_measures.html new file mode 100644 index 00000000..228f9277 --- /dev/null +++ b/bioimageio/core/stat_measures.html @@ -0,0 +1,1712 @@ + + + + + + + bioimageio.core.stat_measures API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.core.stat_measures

    + + + + + + +
      1from __future__ import annotations
    +  2
    +  3from abc import ABC, abstractmethod
    +  4from typing import (
    +  5    Any,
    +  6    Dict,
    +  7    Literal,
    +  8    Mapping,
    +  9    Optional,
    + 10    Protocol,
    + 11    Tuple,
    + 12    TypeVar,
    + 13    Union,
    + 14)
    + 15
    + 16import numpy as np
    + 17from pydantic import (
    + 18    BaseModel,
    + 19    BeforeValidator,
    + 20    Discriminator,
    + 21    PlainSerializer,
    + 22)
    + 23from typing_extensions import Annotated
    + 24
    + 25from .axis import AxisId
    + 26from .common import MemberId, PerMember
    + 27from .tensor import Tensor
    + 28
    + 29
    + 30def tensor_custom_before_validator(data: Union[Tensor, Mapping[str, Any]]):
    + 31    if isinstance(data, Tensor):
    + 32        return data
    + 33
    + 34    # custom before validation logic
    + 35    return Tensor(np.asarray(data["data"]), dims=data["dims"])
    + 36
    + 37
    + 38def tensor_custom_serializer(t: Tensor) -> Dict[str, Any]:
    + 39    # custome serialization logic
    + 40    return {"data": t.data.data.tolist(), "dims": list(map(str, t.dims))}
    + 41
    + 42
    + 43MeasureValue = Union[
    + 44    float,
    + 45    Annotated[
    + 46        Tensor,
    + 47        BeforeValidator(tensor_custom_before_validator),
    + 48        PlainSerializer(tensor_custom_serializer),
    + 49    ],
    + 50]
    + 51
    + 52
    + 53# using Sample Protocol really only to avoid circular imports
    + 54class SampleLike(Protocol):
    + 55    @property
    + 56    def members(self) -> PerMember[Tensor]: ...
    + 57
    + 58
    + 59class MeasureBase(BaseModel, frozen=True):
    + 60    member_id: MemberId
    + 61
    + 62
    + 63class SampleMeasureBase(MeasureBase, ABC, frozen=True):
    + 64    scope: Literal["sample"] = "sample"
    + 65
    + 66    @abstractmethod
    + 67    def compute(self, sample: SampleLike) -> MeasureValue:
    + 68        """compute the measure"""
    + 69        ...
    + 70
    + 71
    + 72class DatasetMeasureBase(MeasureBase, ABC, frozen=True):
    + 73    scope: Literal["dataset"] = "dataset"
    + 74
    + 75
    + 76class _Mean(BaseModel, frozen=True):
    + 77    name: Literal["mean"] = "mean"
    + 78    axes: Optional[Tuple[AxisId, ...]] = None
    + 79    """`axes` to reduce"""
    + 80
    + 81
    + 82class SampleMean(_Mean, SampleMeasureBase, frozen=True):
    + 83    """The mean value of a single tensor"""
    + 84
    + 85    def compute(self, sample: SampleLike) -> MeasureValue:
    + 86        tensor = sample.members[self.member_id]
    + 87        return tensor.mean(dim=self.axes)
    + 88
    + 89    def model_post_init(self, __context: Any):
    + 90        assert self.axes is None or AxisId("batch") not in self.axes
    + 91
    + 92
    + 93class DatasetMean(_Mean, DatasetMeasureBase, frozen=True):
    + 94    """The mean value across multiple samples"""
    + 95
    + 96    def model_post_init(self, __context: Any):
    + 97        assert self.axes is None or AxisId("batch") in self.axes
    + 98
    + 99
    +100class _Std(BaseModel, frozen=True):
    +101    name: Literal["std"] = "std"
    +102    axes: Optional[Tuple[AxisId, ...]] = None
    +103    """`axes` to reduce"""
    +104
    +105
    +106class SampleStd(_Std, SampleMeasureBase, frozen=True):
    +107    """The standard deviation of a single tensor"""
    +108
    +109    def compute(self, sample: SampleLike) -> MeasureValue:
    +110        tensor = sample.members[self.member_id]
    +111        return tensor.std(dim=self.axes)
    +112
    +113    def model_post_init(self, __context: Any):
    +114        assert self.axes is None or AxisId("batch") not in self.axes
    +115
    +116
    +117class DatasetStd(_Std, DatasetMeasureBase, frozen=True):
    +118    """The standard deviation across multiple samples"""
    +119
    +120    def model_post_init(self, __context: Any):
    +121        assert self.axes is None or AxisId("batch") in self.axes
    +122
    +123
    +124class _Var(BaseModel, frozen=True):
    +125    name: Literal["var"] = "var"
    +126    axes: Optional[Tuple[AxisId, ...]] = None
    +127    """`axes` to reduce"""
    +128
    +129
    +130class SampleVar(_Var, SampleMeasureBase, frozen=True):
    +131    """The variance of a single tensor"""
    +132
    +133    def compute(self, sample: SampleLike) -> MeasureValue:
    +134        tensor = sample.members[self.member_id]
    +135        return tensor.var(dim=self.axes)
    +136
    +137    def model_post_init(self, __context: Any):
    +138        assert self.axes is None or AxisId("batch") not in self.axes
    +139
    +140
    +141class DatasetVar(_Var, DatasetMeasureBase, frozen=True):
    +142    """The variance across multiple samples"""
    +143
    +144    def model_post_init(self, __context: Any):  # TODO: turn into @model_validator
    +145        assert self.axes is None or AxisId("batch") in self.axes
    +146
    +147
    +148class _Quantile(BaseModel, frozen=True):
    +149    name: Literal["quantile"] = "quantile"
    +150    q: float
    +151    axes: Optional[Tuple[AxisId, ...]] = None
    +152    """`axes` to reduce"""
    +153
    +154    def model_post_init(self, __context: Any):
    +155        assert self.q >= 0.0
    +156        assert self.q <= 1.0
    +157
    +158
    +159class SampleQuantile(_Quantile, SampleMeasureBase, frozen=True):
    +160    """The `n`th percentile of a single tensor"""
    +161
    +162    def compute(self, sample: SampleLike) -> MeasureValue:
    +163        tensor = sample.members[self.member_id]
    +164        return tensor.quantile(self.q, dim=self.axes)
    +165
    +166    def model_post_init(self, __context: Any):
    +167        super().model_post_init(__context)
    +168        assert self.axes is None or AxisId("batch") not in self.axes
    +169
    +170
    +171class DatasetPercentile(_Quantile, DatasetMeasureBase, frozen=True):
    +172    """The `n`th percentile across multiple samples"""
    +173
    +174    def model_post_init(self, __context: Any):
    +175        super().model_post_init(__context)
    +176        assert self.axes is None or AxisId("batch") in self.axes
    +177
    +178
    +179SampleMeasure = Annotated[
    +180    Union[SampleMean, SampleStd, SampleVar, SampleQuantile], Discriminator("name")
    +181]
    +182DatasetMeasure = Annotated[
    +183    Union[DatasetMean, DatasetStd, DatasetVar, DatasetPercentile], Discriminator("name")
    +184]
    +185Measure = Annotated[Union[SampleMeasure, DatasetMeasure], Discriminator("scope")]
    +186Stat = Dict[Measure, MeasureValue]
    +187
    +188MeanMeasure = Union[SampleMean, DatasetMean]
    +189StdMeasure = Union[SampleStd, DatasetStd]
    +190VarMeasure = Union[SampleVar, DatasetVar]
    +191PercentileMeasure = Union[SampleQuantile, DatasetPercentile]
    +192MeanMeasureT = TypeVar("MeanMeasureT", bound=MeanMeasure)
    +193StdMeasureT = TypeVar("StdMeasureT", bound=StdMeasure)
    +194VarMeasureT = TypeVar("VarMeasureT", bound=VarMeasure)
    +195PercentileMeasureT = TypeVar("PercentileMeasureT", bound=PercentileMeasure)
    +
    + + +
    +
    + +
    + + def + tensor_custom_before_validator(data: Union[bioimageio.core.Tensor, Mapping[str, Any]]): + + + +
    + +
    31def tensor_custom_before_validator(data: Union[Tensor, Mapping[str, Any]]):
    +32    if isinstance(data, Tensor):
    +33        return data
    +34
    +35    # custom before validation logic
    +36    return Tensor(np.asarray(data["data"]), dims=data["dims"])
    +
    + + + + +
    +
    + +
    + + def + tensor_custom_serializer(t: bioimageio.core.Tensor) -> Dict[str, Any]: + + + +
    + +
    39def tensor_custom_serializer(t: Tensor) -> Dict[str, Any]:
    +40    # custome serialization logic
    +41    return {"data": t.data.data.tolist(), "dims": list(map(str, t.dims))}
    +
    + + + + +
    +
    +
    + MeasureValue = + + typing.Union[float, typing.Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]] + + +
    + + + + +
    +
    + +
    + + class + SampleLike(typing.Protocol): + + + +
    + +
    55class SampleLike(Protocol):
    +56    @property
    +57    def members(self) -> PerMember[Tensor]: ...
    +
    + + +

    Base class for protocol classes.

    + +

    Protocol classes are defined as::

    + +
    class Proto(Protocol):
    +    def meth(self) -> int:
    +        ...
    +
    + +

    Such classes are primarily used with static type checkers that recognize +structural subtyping (static duck-typing).

    + +

    For example::

    + +
    class C:
    +    def meth(self) -> int:
    +        return 0
    +
    +def func(x: Proto) -> int:
    +    return x.meth()
    +
    +func(C())  # Passes static type check
    +
    + +

    See PEP 544 for details. Protocol classes decorated with +@typing.runtime_checkable act as simple-minded runtime protocols that check +only the presence of given attributes, ignoring their type signatures. +Protocol classes can be generic, they are defined as::

    + +
    class GenProto[T](Protocol):
    +    def meth(self) -> T:
    +        ...
    +
    +
    + + +
    + +
    + + SampleLike(*args, **kwargs) + + + +
    + +
    1767def _no_init_or_replace_init(self, *args, **kwargs):
    +1768    cls = type(self)
    +1769
    +1770    if cls._is_protocol:
    +1771        raise TypeError('Protocols cannot be instantiated')
    +1772
    +1773    # Already using a custom `__init__`. No need to calculate correct
    +1774    # `__init__` to call. This can lead to RecursionError. See bpo-45121.
    +1775    if cls.__init__ is not _no_init_or_replace_init:
    +1776        return
    +1777
    +1778    # Initially, `__init__` of a protocol subclass is set to `_no_init_or_replace_init`.
    +1779    # The first instantiation of the subclass will call `_no_init_or_replace_init` which
    +1780    # searches for a proper new `__init__` in the MRO. The new `__init__`
    +1781    # replaces the subclass' old `__init__` (ie `_no_init_or_replace_init`). Subsequent
    +1782    # instantiation of the protocol subclass will thus use the new
    +1783    # `__init__` and no longer call `_no_init_or_replace_init`.
    +1784    for base in cls.__mro__:
    +1785        init = base.__dict__.get('__init__', _no_init_or_replace_init)
    +1786        if init is not _no_init_or_replace_init:
    +1787            cls.__init__ = init
    +1788            break
    +1789    else:
    +1790        # should not happen
    +1791        cls.__init__ = object.__init__
    +1792
    +1793    cls.__init__(self, *args, **kwargs)
    +
    + + + + +
    +
    + +
    + members: Mapping[bioimageio.spec.model.v0_5.TensorId, bioimageio.core.Tensor] + + + +
    + +
    56    @property
    +57    def members(self) -> PerMember[Tensor]: ...
    +
    + + + + +
    +
    +
    + +
    + + class + MeasureBase(pydantic.main.BaseModel): + + + +
    + +
    60class MeasureBase(BaseModel, frozen=True):
    +61    member_id: MemberId
    +
    + + +

    Usage docs: https://docs.pydantic.dev/2.9/concepts/models/

    + +

    A base class for creating Pydantic models.

    + +
    Attributes:
    + +
      +
    • __class_vars__: The names of the class variables defined on the model.
    • +
    • __private_attributes__: Metadata about the private attributes of the model.
    • +
    • __signature__: The synthesized __init__ [Signature][inspect.Signature] of the model.
    • +
    • __pydantic_complete__: Whether model building is completed, or if there are still undefined fields.
    • +
    • __pydantic_core_schema__: The core schema of the model.
    • +
    • __pydantic_custom_init__: Whether the model has a custom __init__ function.
    • +
    • __pydantic_decorators__: Metadata containing the decorators defined on the model. +This replaces Model.__validators__ and Model.__root_validators__ from Pydantic V1.
    • +
    • __pydantic_generic_metadata__: Metadata for generic models; contains data used for a similar purpose to +__args__, __origin__, __parameters__ in typing-module generics. May eventually be replaced by these.
    • +
    • __pydantic_parent_namespace__: Parent namespace of the model, used for automatic rebuilding of models.
    • +
    • __pydantic_post_init__: The name of the post-init method for the model, if defined.
    • +
    • __pydantic_root_model__: Whether the model is a [RootModel][pydantic.root_model.RootModel].
    • +
    • __pydantic_serializer__: The pydantic-core SchemaSerializer used to dump instances of the model.
    • +
    • __pydantic_validator__: The pydantic-core SchemaValidator used to validate instances of the model.
    • +
    • __pydantic_extra__: A dictionary containing extra values, if [extra][pydantic.config.ConfigDict.extra] +is set to 'allow'.
    • +
    • __pydantic_fields_set__: The names of fields explicitly set during instantiation.
    • +
    • __pydantic_private__: Values of private attributes set on the model instance.
    • +
    +
    + + +
    + + + + + +
    +
    +
    + +
    + + class + SampleMeasureBase(MeasureBase, abc.ABC): + + + +
    + +
    64class SampleMeasureBase(MeasureBase, ABC, frozen=True):
    +65    scope: Literal["sample"] = "sample"
    +66
    +67    @abstractmethod
    +68    def compute(self, sample: SampleLike) -> MeasureValue:
    +69        """compute the measure"""
    +70        ...
    +
    + + +

    Usage docs: https://docs.pydantic.dev/2.9/concepts/models/

    + +

    A base class for creating Pydantic models.

    + +
    Attributes:
    + +
      +
    • __class_vars__: The names of the class variables defined on the model.
    • +
    • __private_attributes__: Metadata about the private attributes of the model.
    • +
    • __signature__: The synthesized __init__ [Signature][inspect.Signature] of the model.
    • +
    • __pydantic_complete__: Whether model building is completed, or if there are still undefined fields.
    • +
    • __pydantic_core_schema__: The core schema of the model.
    • +
    • __pydantic_custom_init__: Whether the model has a custom __init__ function.
    • +
    • __pydantic_decorators__: Metadata containing the decorators defined on the model. +This replaces Model.__validators__ and Model.__root_validators__ from Pydantic V1.
    • +
    • __pydantic_generic_metadata__: Metadata for generic models; contains data used for a similar purpose to +__args__, __origin__, __parameters__ in typing-module generics. May eventually be replaced by these.
    • +
    • __pydantic_parent_namespace__: Parent namespace of the model, used for automatic rebuilding of models.
    • +
    • __pydantic_post_init__: The name of the post-init method for the model, if defined.
    • +
    • __pydantic_root_model__: Whether the model is a [RootModel][pydantic.root_model.RootModel].
    • +
    • __pydantic_serializer__: The pydantic-core SchemaSerializer used to dump instances of the model.
    • +
    • __pydantic_validator__: The pydantic-core SchemaValidator used to validate instances of the model.
    • +
    • __pydantic_extra__: A dictionary containing extra values, if [extra][pydantic.config.ConfigDict.extra] +is set to 'allow'.
    • +
    • __pydantic_fields_set__: The names of fields explicitly set during instantiation.
    • +
    • __pydantic_private__: Values of private attributes set on the model instance.
    • +
    +
    + + +
    +
    + scope: Literal['sample'] + + +
    + + + + +
    +
    + +
    +
    @abstractmethod
    + + def + compute( self, sample: SampleLike) -> Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]: + + + +
    + +
    67    @abstractmethod
    +68    def compute(self, sample: SampleLike) -> MeasureValue:
    +69        """compute the measure"""
    +70        ...
    +
    + + +

    compute the measure

    +
    + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + DatasetMeasureBase(MeasureBase, abc.ABC): + + + +
    + +
    73class DatasetMeasureBase(MeasureBase, ABC, frozen=True):
    +74    scope: Literal["dataset"] = "dataset"
    +
    + + +

    Usage docs: https://docs.pydantic.dev/2.9/concepts/models/

    + +

    A base class for creating Pydantic models.

    + +
    Attributes:
    + +
      +
    • __class_vars__: The names of the class variables defined on the model.
    • +
    • __private_attributes__: Metadata about the private attributes of the model.
    • +
    • __signature__: The synthesized __init__ [Signature][inspect.Signature] of the model.
    • +
    • __pydantic_complete__: Whether model building is completed, or if there are still undefined fields.
    • +
    • __pydantic_core_schema__: The core schema of the model.
    • +
    • __pydantic_custom_init__: Whether the model has a custom __init__ function.
    • +
    • __pydantic_decorators__: Metadata containing the decorators defined on the model. +This replaces Model.__validators__ and Model.__root_validators__ from Pydantic V1.
    • +
    • __pydantic_generic_metadata__: Metadata for generic models; contains data used for a similar purpose to +__args__, __origin__, __parameters__ in typing-module generics. May eventually be replaced by these.
    • +
    • __pydantic_parent_namespace__: Parent namespace of the model, used for automatic rebuilding of models.
    • +
    • __pydantic_post_init__: The name of the post-init method for the model, if defined.
    • +
    • __pydantic_root_model__: Whether the model is a [RootModel][pydantic.root_model.RootModel].
    • +
    • __pydantic_serializer__: The pydantic-core SchemaSerializer used to dump instances of the model.
    • +
    • __pydantic_validator__: The pydantic-core SchemaValidator used to validate instances of the model.
    • +
    • __pydantic_extra__: A dictionary containing extra values, if [extra][pydantic.config.ConfigDict.extra] +is set to 'allow'.
    • +
    • __pydantic_fields_set__: The names of fields explicitly set during instantiation.
    • +
    • __pydantic_private__: Values of private attributes set on the model instance.
    • +
    +
    + + +
    +
    + scope: Literal['dataset'] + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + SampleMean(_Mean, SampleMeasureBase): + + + +
    + +
    83class SampleMean(_Mean, SampleMeasureBase, frozen=True):
    +84    """The mean value of a single tensor"""
    +85
    +86    def compute(self, sample: SampleLike) -> MeasureValue:
    +87        tensor = sample.members[self.member_id]
    +88        return tensor.mean(dim=self.axes)
    +89
    +90    def model_post_init(self, __context: Any):
    +91        assert self.axes is None or AxisId("batch") not in self.axes
    +
    + + +

    The mean value of a single tensor

    +
    + + +
    + +
    + + def + compute( self, sample: SampleLike) -> Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]: + + + +
    + +
    86    def compute(self, sample: SampleLike) -> MeasureValue:
    +87        tensor = sample.members[self.member_id]
    +88        return tensor.mean(dim=self.axes)
    +
    + + +

    compute the measure

    +
    + + +
    +
    + +
    + + def + model_post_init(self, _SampleMean__context: Any): + + + +
    + +
    90    def model_post_init(self, __context: Any):
    +91        assert self.axes is None or AxisId("batch") not in self.axes
    +
    + + +

    Override this method to perform additional initialization after __init__ and model_construct. +This is useful if you want to do some validation that requires the entire model to be initialized.

    +
    + + +
    +
    +
    Inherited Members
    +
    +
    _Mean
    +
    name
    +
    axes
    + +
    + + +
    +
    +
    +
    + +
    + + class + DatasetMean(_Mean, DatasetMeasureBase): + + + +
    + +
    94class DatasetMean(_Mean, DatasetMeasureBase, frozen=True):
    +95    """The mean value across multiple samples"""
    +96
    +97    def model_post_init(self, __context: Any):
    +98        assert self.axes is None or AxisId("batch") in self.axes
    +
    + + +

    The mean value across multiple samples

    +
    + + +
    + +
    + + def + model_post_init(self, _DatasetMean__context: Any): + + + +
    + +
    97    def model_post_init(self, __context: Any):
    +98        assert self.axes is None or AxisId("batch") in self.axes
    +
    + + +

    Override this method to perform additional initialization after __init__ and model_construct. +This is useful if you want to do some validation that requires the entire model to be initialized.

    +
    + + +
    +
    +
    Inherited Members
    +
    +
    _Mean
    +
    name
    +
    axes
    + +
    + + +
    +
    +
    +
    + +
    + + class + SampleStd(_Std, SampleMeasureBase): + + + +
    + +
    107class SampleStd(_Std, SampleMeasureBase, frozen=True):
    +108    """The standard deviation of a single tensor"""
    +109
    +110    def compute(self, sample: SampleLike) -> MeasureValue:
    +111        tensor = sample.members[self.member_id]
    +112        return tensor.std(dim=self.axes)
    +113
    +114    def model_post_init(self, __context: Any):
    +115        assert self.axes is None or AxisId("batch") not in self.axes
    +
    + + +

    The standard deviation of a single tensor

    +
    + + +
    + +
    + + def + compute( self, sample: SampleLike) -> Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]: + + + +
    + +
    110    def compute(self, sample: SampleLike) -> MeasureValue:
    +111        tensor = sample.members[self.member_id]
    +112        return tensor.std(dim=self.axes)
    +
    + + +

    compute the measure

    +
    + + +
    +
    + +
    + + def + model_post_init(self, _SampleStd__context: Any): + + + +
    + +
    114    def model_post_init(self, __context: Any):
    +115        assert self.axes is None or AxisId("batch") not in self.axes
    +
    + + +

    Override this method to perform additional initialization after __init__ and model_construct. +This is useful if you want to do some validation that requires the entire model to be initialized.

    +
    + + +
    +
    +
    Inherited Members
    +
    +
    _Std
    +
    name
    +
    axes
    + +
    + + +
    +
    +
    +
    + +
    + + class + DatasetStd(_Std, DatasetMeasureBase): + + + +
    + +
    118class DatasetStd(_Std, DatasetMeasureBase, frozen=True):
    +119    """The standard deviation across multiple samples"""
    +120
    +121    def model_post_init(self, __context: Any):
    +122        assert self.axes is None or AxisId("batch") in self.axes
    +
    + + +

    The standard deviation across multiple samples

    +
    + + +
    + +
    + + def + model_post_init(self, _DatasetStd__context: Any): + + + +
    + +
    121    def model_post_init(self, __context: Any):
    +122        assert self.axes is None or AxisId("batch") in self.axes
    +
    + + +

    Override this method to perform additional initialization after __init__ and model_construct. +This is useful if you want to do some validation that requires the entire model to be initialized.

    +
    + + +
    +
    +
    Inherited Members
    +
    +
    _Std
    +
    name
    +
    axes
    + +
    + + +
    +
    +
    +
    + +
    + + class + SampleVar(_Var, SampleMeasureBase): + + + +
    + +
    131class SampleVar(_Var, SampleMeasureBase, frozen=True):
    +132    """The variance of a single tensor"""
    +133
    +134    def compute(self, sample: SampleLike) -> MeasureValue:
    +135        tensor = sample.members[self.member_id]
    +136        return tensor.var(dim=self.axes)
    +137
    +138    def model_post_init(self, __context: Any):
    +139        assert self.axes is None or AxisId("batch") not in self.axes
    +
    + + +

    The variance of a single tensor

    +
    + + +
    + +
    + + def + compute( self, sample: SampleLike) -> Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]: + + + +
    + +
    134    def compute(self, sample: SampleLike) -> MeasureValue:
    +135        tensor = sample.members[self.member_id]
    +136        return tensor.var(dim=self.axes)
    +
    + + +

    compute the measure

    +
    + + +
    +
    + +
    + + def + model_post_init(self, _SampleVar__context: Any): + + + +
    + +
    138    def model_post_init(self, __context: Any):
    +139        assert self.axes is None or AxisId("batch") not in self.axes
    +
    + + +

    Override this method to perform additional initialization after __init__ and model_construct. +This is useful if you want to do some validation that requires the entire model to be initialized.

    +
    + + +
    +
    +
    Inherited Members
    +
    +
    _Var
    +
    name
    +
    axes
    + +
    + + +
    +
    +
    +
    + +
    + + class + DatasetVar(_Var, DatasetMeasureBase): + + + +
    + +
    142class DatasetVar(_Var, DatasetMeasureBase, frozen=True):
    +143    """The variance across multiple samples"""
    +144
    +145    def model_post_init(self, __context: Any):  # TODO: turn into @model_validator
    +146        assert self.axes is None or AxisId("batch") in self.axes
    +
    + + +

    The variance across multiple samples

    +
    + + +
    + +
    + + def + model_post_init(self, _DatasetVar__context: Any): + + + +
    + +
    145    def model_post_init(self, __context: Any):  # TODO: turn into @model_validator
    +146        assert self.axes is None or AxisId("batch") in self.axes
    +
    + + +

    Override this method to perform additional initialization after __init__ and model_construct. +This is useful if you want to do some validation that requires the entire model to be initialized.

    +
    + + +
    +
    +
    Inherited Members
    +
    +
    _Var
    +
    name
    +
    axes
    + +
    + + +
    +
    +
    +
    + +
    + + class + SampleQuantile(_Quantile, SampleMeasureBase): + + + +
    + +
    160class SampleQuantile(_Quantile, SampleMeasureBase, frozen=True):
    +161    """The `n`th percentile of a single tensor"""
    +162
    +163    def compute(self, sample: SampleLike) -> MeasureValue:
    +164        tensor = sample.members[self.member_id]
    +165        return tensor.quantile(self.q, dim=self.axes)
    +166
    +167    def model_post_init(self, __context: Any):
    +168        super().model_post_init(__context)
    +169        assert self.axes is None or AxisId("batch") not in self.axes
    +
    + + +

    The nth percentile of a single tensor

    +
    + + +
    + +
    + + def + compute( self, sample: SampleLike) -> Union[float, Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator at 0x7f5372ef6840>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer at 0x7f5372ef6a20>, return_type=PydanticUndefined, when_used='always')]]: + + + +
    + +
    163    def compute(self, sample: SampleLike) -> MeasureValue:
    +164        tensor = sample.members[self.member_id]
    +165        return tensor.quantile(self.q, dim=self.axes)
    +
    + + +

    compute the measure

    +
    + + +
    +
    + +
    + + def + model_post_init(self, _SampleQuantile__context: Any): + + + +
    + +
    167    def model_post_init(self, __context: Any):
    +168        super().model_post_init(__context)
    +169        assert self.axes is None or AxisId("batch") not in self.axes
    +
    + + +

    Override this method to perform additional initialization after __init__ and model_construct. +This is useful if you want to do some validation that requires the entire model to be initialized.

    +
    + + +
    +
    +
    Inherited Members
    +
    +
    _Quantile
    +
    name
    +
    q
    +
    axes
    + +
    + + +
    +
    +
    +
    + +
    + + class + DatasetPercentile(_Quantile, DatasetMeasureBase): + + + +
    + +
    172class DatasetPercentile(_Quantile, DatasetMeasureBase, frozen=True):
    +173    """The `n`th percentile across multiple samples"""
    +174
    +175    def model_post_init(self, __context: Any):
    +176        super().model_post_init(__context)
    +177        assert self.axes is None or AxisId("batch") in self.axes
    +
    + + +

    The nth percentile across multiple samples

    +
    + + +
    + +
    + + def + model_post_init(self, _DatasetPercentile__context: Any): + + + +
    + +
    175    def model_post_init(self, __context: Any):
    +176        super().model_post_init(__context)
    +177        assert self.axes is None or AxisId("batch") in self.axes
    +
    + + +

    Override this method to perform additional initialization after __init__ and model_construct. +This is useful if you want to do some validation that requires the entire model to be initialized.

    +
    + + +
    +
    +
    Inherited Members
    +
    +
    _Quantile
    +
    name
    +
    q
    +
    axes
    + +
    + + +
    +
    +
    +
    +
    + SampleMeasure = + + typing.Annotated[typing.Union[SampleMean, SampleStd, SampleVar, SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + + + +
    +
    +
    + DatasetMeasure = + + typing.Annotated[typing.Union[DatasetMean, DatasetStd, DatasetVar, DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + + + +
    +
    +
    + Measure = + + typing.Annotated[typing.Union[typing.Annotated[typing.Union[SampleMean, SampleStd, SampleVar, SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[DatasetMean, DatasetStd, DatasetVar, DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + + + +
    +
    +
    + Stat = + + typing.Dict[typing.Annotated[typing.Union[typing.Annotated[typing.Union[SampleMean, SampleStd, SampleVar, SampleQuantile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[DatasetMean, DatasetStd, DatasetVar, DatasetPercentile], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='scope', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Union[float, typing.Annotated[bioimageio.core.Tensor, BeforeValidator(func=<function tensor_custom_before_validator>, json_schema_input_type=PydanticUndefined), PlainSerializer(func=<function tensor_custom_serializer>, return_type=PydanticUndefined, when_used='always')]]] + + +
    + + + + +
    +
    +
    + MeanMeasure = +typing.Union[SampleMean, DatasetMean] + + +
    + + + + +
    +
    +
    + StdMeasure = +typing.Union[SampleStd, DatasetStd] + + +
    + + + + +
    +
    +
    + VarMeasure = +typing.Union[SampleVar, DatasetVar] + + +
    + + + + +
    +
    +
    + PercentileMeasure = + + typing.Union[SampleQuantile, DatasetPercentile] + + +
    + + + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/core/tensor.html b/bioimageio/core/tensor.html new file mode 100644 index 00000000..4d203b85 --- /dev/null +++ b/bioimageio/core/tensor.html @@ -0,0 +1,2018 @@ + + + + + + + bioimageio.core.tensor API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.core.tensor

    + + + + + + +
      1from __future__ import annotations
    +  2
    +  3import collections.abc
    +  4from itertools import permutations
    +  5from typing import (
    +  6    TYPE_CHECKING,
    +  7    Any,
    +  8    Callable,
    +  9    Dict,
    + 10    Iterator,
    + 11    Mapping,
    + 12    Optional,
    + 13    Sequence,
    + 14    Tuple,
    + 15    Union,
    + 16    cast,
    + 17    get_args,
    + 18)
    + 19
    + 20import numpy as np
    + 21import xarray as xr
    + 22from loguru import logger
    + 23from numpy.typing import DTypeLike, NDArray
    + 24from typing_extensions import Self, assert_never
    + 25
    + 26from bioimageio.spec.model import v0_5
    + 27
    + 28from ._magic_tensor_ops import MagicTensorOpsMixin
    + 29from .axis import Axis, AxisId, AxisInfo, AxisLike, PerAxis
    + 30from .common import (
    + 31    CropWhere,
    + 32    DTypeStr,
    + 33    PadMode,
    + 34    PadWhere,
    + 35    PadWidth,
    + 36    PadWidthLike,
    + 37    SliceInfo,
    + 38)
    + 39
    + 40if TYPE_CHECKING:
    + 41    from numpy.typing import ArrayLike, NDArray
    + 42
    + 43
    + 44_ScalarOrArray = Union["ArrayLike", np.generic, "NDArray[Any]"]  # TODO: add "DaskArray"
    + 45
    + 46
    + 47# TODO: complete docstrings
    + 48class Tensor(MagicTensorOpsMixin):
    + 49    """A wrapper around an xr.DataArray for better integration with bioimageio.spec
    + 50    and improved type annotations."""
    + 51
    + 52    _Compatible = Union["Tensor", xr.DataArray, _ScalarOrArray]
    + 53
    + 54    def __init__(
    + 55        self,
    + 56        array: NDArray[Any],
    + 57        dims: Sequence[Union[AxisId, AxisLike]],
    + 58    ) -> None:
    + 59        super().__init__()
    + 60        axes = tuple(
    + 61            a if isinstance(a, AxisId) else AxisInfo.create(a).id for a in dims
    + 62        )
    + 63        self._data = xr.DataArray(array, dims=axes)
    + 64
    + 65    def __array__(self, dtype: DTypeLike = None):
    + 66        return np.asarray(self._data, dtype=dtype)
    + 67
    + 68    def __getitem__(
    + 69        self, key: Union[SliceInfo, slice, int, PerAxis[Union[SliceInfo, slice, int]]]
    + 70    ) -> Self:
    + 71        if isinstance(key, SliceInfo):
    + 72            key = slice(*key)
    + 73        elif isinstance(key, collections.abc.Mapping):
    + 74            key = {
    + 75                a: s if isinstance(s, int) else s if isinstance(s, slice) else slice(*s)
    + 76                for a, s in key.items()
    + 77            }
    + 78        return self.__class__.from_xarray(self._data[key])
    + 79
    + 80    def __setitem__(self, key: PerAxis[Union[SliceInfo, slice]], value: Tensor) -> None:
    + 81        key = {a: s if isinstance(s, slice) else slice(*s) for a, s in key.items()}
    + 82        self._data[key] = value._data
    + 83
    + 84    def __len__(self) -> int:
    + 85        return len(self.data)
    + 86
    + 87    def _iter(self: Any) -> Iterator[Any]:
    + 88        for n in range(len(self)):
    + 89            yield self[n]
    + 90
    + 91    def __iter__(self: Any) -> Iterator[Any]:
    + 92        if self.ndim == 0:
    + 93            raise TypeError("iteration over a 0-d array")
    + 94        return self._iter()
    + 95
    + 96    def _binary_op(
    + 97        self,
    + 98        other: _Compatible,
    + 99        f: Callable[[Any, Any], Any],
    +100        reflexive: bool = False,
    +101    ) -> Self:
    +102        data = self._data._binary_op(  # pyright: ignore[reportPrivateUsage]
    +103            (other._data if isinstance(other, Tensor) else other),
    +104            f,
    +105            reflexive,
    +106        )
    +107        return self.__class__.from_xarray(data)
    +108
    +109    def _inplace_binary_op(
    +110        self,
    +111        other: _Compatible,
    +112        f: Callable[[Any, Any], Any],
    +113    ) -> Self:
    +114        _ = self._data._inplace_binary_op(  # pyright: ignore[reportPrivateUsage]
    +115            (
    +116                other_d
    +117                if (other_d := getattr(other, "data")) is not None
    +118                and isinstance(
    +119                    other_d,
    +120                    xr.DataArray,
    +121                )
    +122                else other
    +123            ),
    +124            f,
    +125        )
    +126        return self
    +127
    +128    def _unary_op(self, f: Callable[[Any], Any], *args: Any, **kwargs: Any) -> Self:
    +129        data = self._data._unary_op(  # pyright: ignore[reportPrivateUsage]
    +130            f, *args, **kwargs
    +131        )
    +132        return self.__class__.from_xarray(data)
    +133
    +134    @classmethod
    +135    def from_xarray(cls, data_array: xr.DataArray) -> Self:
    +136        """create a `Tensor` from an xarray data array
    +137
    +138        note for internal use: this factory method is round-trip save
    +139            for any `Tensor`'s  `data` property (an xarray.DataArray).
    +140        """
    +141        return cls(
    +142            array=data_array.data, dims=tuple(AxisId(d) for d in data_array.dims)
    +143        )
    +144
    +145    @classmethod
    +146    def from_numpy(
    +147        cls,
    +148        array: NDArray[Any],
    +149        *,
    +150        dims: Optional[Union[AxisLike, Sequence[AxisLike]]],
    +151    ) -> Tensor:
    +152        """create a `Tensor` from a numpy array
    +153
    +154        Args:
    +155            array: the nd numpy array
    +156            axes: A description of the array's axes,
    +157                if None axes are guessed (which might fail and raise a ValueError.)
    +158
    +159        Raises:
    +160            ValueError: if `axes` is None and axes guessing fails.
    +161        """
    +162
    +163        if dims is None:
    +164            return cls._interprete_array_wo_known_axes(array)
    +165        elif isinstance(dims, (str, Axis, v0_5.AxisBase)):
    +166            dims = [dims]
    +167
    +168        axis_infos = [AxisInfo.create(a) for a in dims]
    +169        original_shape = tuple(array.shape)
    +170
    +171        successful_view = _get_array_view(array, axis_infos)
    +172        if successful_view is None:
    +173            raise ValueError(
    +174                f"Array shape {original_shape} does not map to axes {dims}"
    +175            )
    +176
    +177        return Tensor(successful_view, dims=tuple(a.id for a in axis_infos))
    +178
    +179    @property
    +180    def data(self):
    +181        return self._data
    +182
    +183    @property
    +184    def dims(self):  # TODO: rename to `axes`?
    +185        """Tuple of dimension names associated with this tensor."""
    +186        return cast(Tuple[AxisId, ...], self._data.dims)
    +187
    +188    @property
    +189    def tagged_shape(self):
    +190        """(alias for `sizes`) Ordered, immutable mapping from axis ids to lengths."""
    +191        return self.sizes
    +192
    +193    @property
    +194    def shape_tuple(self):
    +195        """Tuple of tensor axes lengths"""
    +196        return self._data.shape
    +197
    +198    @property
    +199    def size(self):
    +200        """Number of elements in the tensor.
    +201
    +202        Equal to math.prod(tensor.shape), i.e., the product of the tensors’ dimensions.
    +203        """
    +204        return self._data.size
    +205
    +206    def sum(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
    +207        """Reduce this Tensor's data by applying sum along some dimension(s)."""
    +208        return self.__class__.from_xarray(self._data.sum(dim=dim))
    +209
    +210    @property
    +211    def ndim(self):
    +212        """Number of tensor dimensions."""
    +213        return self._data.ndim
    +214
    +215    @property
    +216    def dtype(self) -> DTypeStr:
    +217        dt = str(self.data.dtype)  # pyright: ignore[reportUnknownArgumentType]
    +218        assert dt in get_args(DTypeStr)
    +219        return dt  # pyright: ignore[reportReturnType]
    +220
    +221    @property
    +222    def sizes(self):
    +223        """Ordered, immutable mapping from axis ids to axis lengths."""
    +224        return cast(Mapping[AxisId, int], self.data.sizes)
    +225
    +226    def astype(self, dtype: DTypeStr, *, copy: bool = False):
    +227        """Return tensor cast to `dtype`
    +228
    +229        note: if dtype is already satisfied copy if `copy`"""
    +230        return self.__class__.from_xarray(self._data.astype(dtype, copy=copy))
    +231
    +232    def clip(self, min: Optional[float] = None, max: Optional[float] = None):
    +233        """Return a tensor whose values are limited to [min, max].
    +234        At least one of max or min must be given."""
    +235        return self.__class__.from_xarray(self._data.clip(min, max))
    +236
    +237    def crop_to(
    +238        self,
    +239        sizes: PerAxis[int],
    +240        crop_where: Union[
    +241            CropWhere,
    +242            PerAxis[CropWhere],
    +243        ] = "left_and_right",
    +244    ) -> Self:
    +245        """crop to match `sizes`"""
    +246        if isinstance(crop_where, str):
    +247            crop_axis_where: PerAxis[CropWhere] = {a: crop_where for a in self.dims}
    +248        else:
    +249            crop_axis_where = crop_where
    +250
    +251        slices: Dict[AxisId, SliceInfo] = {}
    +252
    +253        for a, s_is in self.sizes.items():
    +254            if a not in sizes or sizes[a] == s_is:
    +255                pass
    +256            elif sizes[a] > s_is:
    +257                logger.warning(
    +258                    "Cannot crop axis {} of size {} to larger size {}",
    +259                    a,
    +260                    s_is,
    +261                    sizes[a],
    +262                )
    +263            elif a not in crop_axis_where:
    +264                raise ValueError(
    +265                    f"Don't know where to crop axis {a}, `crop_where`={crop_where}"
    +266                )
    +267            else:
    +268                crop_this_axis_where = crop_axis_where[a]
    +269                if crop_this_axis_where == "left":
    +270                    slices[a] = SliceInfo(s_is - sizes[a], s_is)
    +271                elif crop_this_axis_where == "right":
    +272                    slices[a] = SliceInfo(0, sizes[a])
    +273                elif crop_this_axis_where == "left_and_right":
    +274                    slices[a] = SliceInfo(
    +275                        start := (s_is - sizes[a]) // 2, sizes[a] + start
    +276                    )
    +277                else:
    +278                    assert_never(crop_this_axis_where)
    +279
    +280        return self[slices]
    +281
    +282    def expand_dims(self, dims: Union[Sequence[AxisId], PerAxis[int]]) -> Self:
    +283        return self.__class__.from_xarray(self._data.expand_dims(dims=dims))
    +284
    +285    def mean(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
    +286        return self.__class__.from_xarray(self._data.mean(dim=dim))
    +287
    +288    def std(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
    +289        return self.__class__.from_xarray(self._data.std(dim=dim))
    +290
    +291    def var(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
    +292        return self.__class__.from_xarray(self._data.var(dim=dim))
    +293
    +294    def pad(
    +295        self,
    +296        pad_width: PerAxis[PadWidthLike],
    +297        mode: PadMode = "symmetric",
    +298    ) -> Self:
    +299        pad_width = {a: PadWidth.create(p) for a, p in pad_width.items()}
    +300        return self.__class__.from_xarray(
    +301            self._data.pad(pad_width=pad_width, mode=mode)
    +302        )
    +303
    +304    def pad_to(
    +305        self,
    +306        sizes: PerAxis[int],
    +307        pad_where: Union[PadWhere, PerAxis[PadWhere]] = "left_and_right",
    +308        mode: PadMode = "symmetric",
    +309    ) -> Self:
    +310        """pad `tensor` to match `sizes`"""
    +311        if isinstance(pad_where, str):
    +312            pad_axis_where: PerAxis[PadWhere] = {a: pad_where for a in self.dims}
    +313        else:
    +314            pad_axis_where = pad_where
    +315
    +316        pad_width: Dict[AxisId, PadWidth] = {}
    +317        for a, s_is in self.sizes.items():
    +318            if a not in sizes or sizes[a] == s_is:
    +319                pad_width[a] = PadWidth(0, 0)
    +320            elif s_is > sizes[a]:
    +321                pad_width[a] = PadWidth(0, 0)
    +322                logger.warning(
    +323                    "Cannot pad axis {} of size {} to smaller size {}",
    +324                    a,
    +325                    s_is,
    +326                    sizes[a],
    +327                )
    +328            elif a not in pad_axis_where:
    +329                raise ValueError(
    +330                    f"Don't know where to pad axis {a}, `pad_where`={pad_where}"
    +331                )
    +332            else:
    +333                pad_this_axis_where = pad_axis_where[a]
    +334                d = sizes[a] - s_is
    +335                if pad_this_axis_where == "left":
    +336                    pad_width[a] = PadWidth(d, 0)
    +337                elif pad_this_axis_where == "right":
    +338                    pad_width[a] = PadWidth(0, d)
    +339                elif pad_this_axis_where == "left_and_right":
    +340                    pad_width[a] = PadWidth(left := d // 2, d - left)
    +341                else:
    +342                    assert_never(pad_this_axis_where)
    +343
    +344        return self.pad(pad_width, mode)
    +345
    +346    def quantile(
    +347        self,
    +348        q: Union[float, Sequence[float]],
    +349        dim: Optional[Union[AxisId, Sequence[AxisId]]] = None,
    +350    ) -> Self:
    +351        assert (
    +352            isinstance(q, (float, int))
    +353            and q >= 0.0
    +354            or not isinstance(q, (float, int))
    +355            and all(qq >= 0.0 for qq in q)
    +356        )
    +357        assert (
    +358            isinstance(q, (float, int))
    +359            and q <= 1.0
    +360            or not isinstance(q, (float, int))
    +361            and all(qq <= 1.0 for qq in q)
    +362        )
    +363        assert dim is None or (
    +364            (quantile_dim := AxisId("quantile")) != dim and quantile_dim not in set(dim)
    +365        )
    +366        return self.__class__.from_xarray(self._data.quantile(q, dim=dim))
    +367
    +368    def resize_to(
    +369        self,
    +370        sizes: PerAxis[int],
    +371        *,
    +372        pad_where: Union[
    +373            PadWhere,
    +374            PerAxis[PadWhere],
    +375        ] = "left_and_right",
    +376        crop_where: Union[
    +377            CropWhere,
    +378            PerAxis[CropWhere],
    +379        ] = "left_and_right",
    +380        pad_mode: PadMode = "symmetric",
    +381    ):
    +382        """return cropped/padded tensor with `sizes`"""
    +383        crop_to_sizes: Dict[AxisId, int] = {}
    +384        pad_to_sizes: Dict[AxisId, int] = {}
    +385        new_axes = dict(sizes)
    +386        for a, s_is in self.sizes.items():
    +387            a = AxisId(str(a))
    +388            _ = new_axes.pop(a, None)
    +389            if a not in sizes or sizes[a] == s_is:
    +390                pass
    +391            elif s_is > sizes[a]:
    +392                crop_to_sizes[a] = sizes[a]
    +393            else:
    +394                pad_to_sizes[a] = sizes[a]
    +395
    +396        tensor = self
    +397        if crop_to_sizes:
    +398            tensor = tensor.crop_to(crop_to_sizes, crop_where=crop_where)
    +399
    +400        if pad_to_sizes:
    +401            tensor = tensor.pad_to(pad_to_sizes, pad_where=pad_where, mode=pad_mode)
    +402
    +403        if new_axes:
    +404            tensor = tensor.expand_dims(new_axes)
    +405
    +406        return tensor
    +407
    +408    def transpose(
    +409        self,
    +410        axes: Sequence[AxisId],
    +411    ) -> Self:
    +412        """return a transposed tensor
    +413
    +414        Args:
    +415            axes: the desired tensor axes
    +416        """
    +417        # expand missing tensor axes
    +418        missing_axes = tuple(a for a in axes if a not in self.dims)
    +419        array = self._data
    +420        if missing_axes:
    +421            array = array.expand_dims(missing_axes)
    +422
    +423        # transpose to the correct axis order
    +424        return self.__class__.from_xarray(array.transpose(*axes))
    +425
    +426    @classmethod
    +427    def _interprete_array_wo_known_axes(cls, array: NDArray[Any]):
    +428        ndim = array.ndim
    +429        if ndim == 2:
    +430            current_axes = (
    +431                v0_5.SpaceInputAxis(id=AxisId("y"), size=array.shape[0]),
    +432                v0_5.SpaceInputAxis(id=AxisId("x"), size=array.shape[1]),
    +433            )
    +434        elif ndim == 3 and any(s <= 3 for s in array.shape):
    +435            current_axes = (
    +436                v0_5.ChannelAxis(
    +437                    channel_names=[
    +438                        v0_5.Identifier(f"channel{i}") for i in range(array.shape[0])
    +439                    ]
    +440                ),
    +441                v0_5.SpaceInputAxis(id=AxisId("y"), size=array.shape[1]),
    +442                v0_5.SpaceInputAxis(id=AxisId("x"), size=array.shape[2]),
    +443            )
    +444        elif ndim == 3:
    +445            current_axes = (
    +446                v0_5.SpaceInputAxis(id=AxisId("z"), size=array.shape[0]),
    +447                v0_5.SpaceInputAxis(id=AxisId("y"), size=array.shape[1]),
    +448                v0_5.SpaceInputAxis(id=AxisId("x"), size=array.shape[2]),
    +449            )
    +450        elif ndim == 4:
    +451            current_axes = (
    +452                v0_5.ChannelAxis(
    +453                    channel_names=[
    +454                        v0_5.Identifier(f"channel{i}") for i in range(array.shape[0])
    +455                    ]
    +456                ),
    +457                v0_5.SpaceInputAxis(id=AxisId("z"), size=array.shape[1]),
    +458                v0_5.SpaceInputAxis(id=AxisId("y"), size=array.shape[2]),
    +459                v0_5.SpaceInputAxis(id=AxisId("x"), size=array.shape[3]),
    +460            )
    +461        elif ndim == 5:
    +462            current_axes = (
    +463                v0_5.BatchAxis(),
    +464                v0_5.ChannelAxis(
    +465                    channel_names=[
    +466                        v0_5.Identifier(f"channel{i}") for i in range(array.shape[1])
    +467                    ]
    +468                ),
    +469                v0_5.SpaceInputAxis(id=AxisId("z"), size=array.shape[2]),
    +470                v0_5.SpaceInputAxis(id=AxisId("y"), size=array.shape[3]),
    +471                v0_5.SpaceInputAxis(id=AxisId("x"), size=array.shape[4]),
    +472            )
    +473        else:
    +474            raise ValueError(f"Could not guess an axis mapping for {array.shape}")
    +475
    +476        return cls(array, dims=tuple(a.id for a in current_axes))
    +477
    +478
    +479def _add_singletons(arr: NDArray[Any], axis_infos: Sequence[AxisInfo]):
    +480    if len(arr.shape) > len(axis_infos):
    +481        # remove singletons
    +482        for i, s in enumerate(arr.shape):
    +483            if s == 1:
    +484                arr = np.take(arr, 0, axis=i)
    +485                if len(arr.shape) == len(axis_infos):
    +486                    break
    +487
    +488    # add singletons if nececsary
    +489    for i, a in enumerate(axis_infos):
    +490        if len(arr.shape) >= len(axis_infos):
    +491            break
    +492
    +493        if a.maybe_singleton:
    +494            arr = np.expand_dims(arr, i)
    +495
    +496    return arr
    +497
    +498
    +499def _get_array_view(
    +500    original_array: NDArray[Any], axis_infos: Sequence[AxisInfo]
    +501) -> Optional[NDArray[Any]]:
    +502    perms = list(permutations(range(len(original_array.shape))))
    +503    perms.insert(1, perms.pop())  # try A and A.T first
    +504
    +505    for perm in perms:
    +506        view = original_array.transpose(perm)
    +507        view = _add_singletons(view, axis_infos)
    +508        if len(view.shape) != len(axis_infos):
    +509            return None
    +510
    +511        for s, a in zip(view.shape, axis_infos):
    +512            if s == 1 and not a.maybe_singleton:
    +513                break
    +514        else:
    +515            return view
    +516
    +517    return None
    +
    + + +
    +
    + +
    + + class + Tensor(bioimageio.core._magic_tensor_ops.MagicTensorOpsMixin): + + + +
    + +
     49class Tensor(MagicTensorOpsMixin):
    + 50    """A wrapper around an xr.DataArray for better integration with bioimageio.spec
    + 51    and improved type annotations."""
    + 52
    + 53    _Compatible = Union["Tensor", xr.DataArray, _ScalarOrArray]
    + 54
    + 55    def __init__(
    + 56        self,
    + 57        array: NDArray[Any],
    + 58        dims: Sequence[Union[AxisId, AxisLike]],
    + 59    ) -> None:
    + 60        super().__init__()
    + 61        axes = tuple(
    + 62            a if isinstance(a, AxisId) else AxisInfo.create(a).id for a in dims
    + 63        )
    + 64        self._data = xr.DataArray(array, dims=axes)
    + 65
    + 66    def __array__(self, dtype: DTypeLike = None):
    + 67        return np.asarray(self._data, dtype=dtype)
    + 68
    + 69    def __getitem__(
    + 70        self, key: Union[SliceInfo, slice, int, PerAxis[Union[SliceInfo, slice, int]]]
    + 71    ) -> Self:
    + 72        if isinstance(key, SliceInfo):
    + 73            key = slice(*key)
    + 74        elif isinstance(key, collections.abc.Mapping):
    + 75            key = {
    + 76                a: s if isinstance(s, int) else s if isinstance(s, slice) else slice(*s)
    + 77                for a, s in key.items()
    + 78            }
    + 79        return self.__class__.from_xarray(self._data[key])
    + 80
    + 81    def __setitem__(self, key: PerAxis[Union[SliceInfo, slice]], value: Tensor) -> None:
    + 82        key = {a: s if isinstance(s, slice) else slice(*s) for a, s in key.items()}
    + 83        self._data[key] = value._data
    + 84
    + 85    def __len__(self) -> int:
    + 86        return len(self.data)
    + 87
    + 88    def _iter(self: Any) -> Iterator[Any]:
    + 89        for n in range(len(self)):
    + 90            yield self[n]
    + 91
    + 92    def __iter__(self: Any) -> Iterator[Any]:
    + 93        if self.ndim == 0:
    + 94            raise TypeError("iteration over a 0-d array")
    + 95        return self._iter()
    + 96
    + 97    def _binary_op(
    + 98        self,
    + 99        other: _Compatible,
    +100        f: Callable[[Any, Any], Any],
    +101        reflexive: bool = False,
    +102    ) -> Self:
    +103        data = self._data._binary_op(  # pyright: ignore[reportPrivateUsage]
    +104            (other._data if isinstance(other, Tensor) else other),
    +105            f,
    +106            reflexive,
    +107        )
    +108        return self.__class__.from_xarray(data)
    +109
    +110    def _inplace_binary_op(
    +111        self,
    +112        other: _Compatible,
    +113        f: Callable[[Any, Any], Any],
    +114    ) -> Self:
    +115        _ = self._data._inplace_binary_op(  # pyright: ignore[reportPrivateUsage]
    +116            (
    +117                other_d
    +118                if (other_d := getattr(other, "data")) is not None
    +119                and isinstance(
    +120                    other_d,
    +121                    xr.DataArray,
    +122                )
    +123                else other
    +124            ),
    +125            f,
    +126        )
    +127        return self
    +128
    +129    def _unary_op(self, f: Callable[[Any], Any], *args: Any, **kwargs: Any) -> Self:
    +130        data = self._data._unary_op(  # pyright: ignore[reportPrivateUsage]
    +131            f, *args, **kwargs
    +132        )
    +133        return self.__class__.from_xarray(data)
    +134
    +135    @classmethod
    +136    def from_xarray(cls, data_array: xr.DataArray) -> Self:
    +137        """create a `Tensor` from an xarray data array
    +138
    +139        note for internal use: this factory method is round-trip save
    +140            for any `Tensor`'s  `data` property (an xarray.DataArray).
    +141        """
    +142        return cls(
    +143            array=data_array.data, dims=tuple(AxisId(d) for d in data_array.dims)
    +144        )
    +145
    +146    @classmethod
    +147    def from_numpy(
    +148        cls,
    +149        array: NDArray[Any],
    +150        *,
    +151        dims: Optional[Union[AxisLike, Sequence[AxisLike]]],
    +152    ) -> Tensor:
    +153        """create a `Tensor` from a numpy array
    +154
    +155        Args:
    +156            array: the nd numpy array
    +157            axes: A description of the array's axes,
    +158                if None axes are guessed (which might fail and raise a ValueError.)
    +159
    +160        Raises:
    +161            ValueError: if `axes` is None and axes guessing fails.
    +162        """
    +163
    +164        if dims is None:
    +165            return cls._interprete_array_wo_known_axes(array)
    +166        elif isinstance(dims, (str, Axis, v0_5.AxisBase)):
    +167            dims = [dims]
    +168
    +169        axis_infos = [AxisInfo.create(a) for a in dims]
    +170        original_shape = tuple(array.shape)
    +171
    +172        successful_view = _get_array_view(array, axis_infos)
    +173        if successful_view is None:
    +174            raise ValueError(
    +175                f"Array shape {original_shape} does not map to axes {dims}"
    +176            )
    +177
    +178        return Tensor(successful_view, dims=tuple(a.id for a in axis_infos))
    +179
    +180    @property
    +181    def data(self):
    +182        return self._data
    +183
    +184    @property
    +185    def dims(self):  # TODO: rename to `axes`?
    +186        """Tuple of dimension names associated with this tensor."""
    +187        return cast(Tuple[AxisId, ...], self._data.dims)
    +188
    +189    @property
    +190    def tagged_shape(self):
    +191        """(alias for `sizes`) Ordered, immutable mapping from axis ids to lengths."""
    +192        return self.sizes
    +193
    +194    @property
    +195    def shape_tuple(self):
    +196        """Tuple of tensor axes lengths"""
    +197        return self._data.shape
    +198
    +199    @property
    +200    def size(self):
    +201        """Number of elements in the tensor.
    +202
    +203        Equal to math.prod(tensor.shape), i.e., the product of the tensors’ dimensions.
    +204        """
    +205        return self._data.size
    +206
    +207    def sum(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
    +208        """Reduce this Tensor's data by applying sum along some dimension(s)."""
    +209        return self.__class__.from_xarray(self._data.sum(dim=dim))
    +210
    +211    @property
    +212    def ndim(self):
    +213        """Number of tensor dimensions."""
    +214        return self._data.ndim
    +215
    +216    @property
    +217    def dtype(self) -> DTypeStr:
    +218        dt = str(self.data.dtype)  # pyright: ignore[reportUnknownArgumentType]
    +219        assert dt in get_args(DTypeStr)
    +220        return dt  # pyright: ignore[reportReturnType]
    +221
    +222    @property
    +223    def sizes(self):
    +224        """Ordered, immutable mapping from axis ids to axis lengths."""
    +225        return cast(Mapping[AxisId, int], self.data.sizes)
    +226
    +227    def astype(self, dtype: DTypeStr, *, copy: bool = False):
    +228        """Return tensor cast to `dtype`
    +229
    +230        note: if dtype is already satisfied copy if `copy`"""
    +231        return self.__class__.from_xarray(self._data.astype(dtype, copy=copy))
    +232
    +233    def clip(self, min: Optional[float] = None, max: Optional[float] = None):
    +234        """Return a tensor whose values are limited to [min, max].
    +235        At least one of max or min must be given."""
    +236        return self.__class__.from_xarray(self._data.clip(min, max))
    +237
    +238    def crop_to(
    +239        self,
    +240        sizes: PerAxis[int],
    +241        crop_where: Union[
    +242            CropWhere,
    +243            PerAxis[CropWhere],
    +244        ] = "left_and_right",
    +245    ) -> Self:
    +246        """crop to match `sizes`"""
    +247        if isinstance(crop_where, str):
    +248            crop_axis_where: PerAxis[CropWhere] = {a: crop_where for a in self.dims}
    +249        else:
    +250            crop_axis_where = crop_where
    +251
    +252        slices: Dict[AxisId, SliceInfo] = {}
    +253
    +254        for a, s_is in self.sizes.items():
    +255            if a not in sizes or sizes[a] == s_is:
    +256                pass
    +257            elif sizes[a] > s_is:
    +258                logger.warning(
    +259                    "Cannot crop axis {} of size {} to larger size {}",
    +260                    a,
    +261                    s_is,
    +262                    sizes[a],
    +263                )
    +264            elif a not in crop_axis_where:
    +265                raise ValueError(
    +266                    f"Don't know where to crop axis {a}, `crop_where`={crop_where}"
    +267                )
    +268            else:
    +269                crop_this_axis_where = crop_axis_where[a]
    +270                if crop_this_axis_where == "left":
    +271                    slices[a] = SliceInfo(s_is - sizes[a], s_is)
    +272                elif crop_this_axis_where == "right":
    +273                    slices[a] = SliceInfo(0, sizes[a])
    +274                elif crop_this_axis_where == "left_and_right":
    +275                    slices[a] = SliceInfo(
    +276                        start := (s_is - sizes[a]) // 2, sizes[a] + start
    +277                    )
    +278                else:
    +279                    assert_never(crop_this_axis_where)
    +280
    +281        return self[slices]
    +282
    +283    def expand_dims(self, dims: Union[Sequence[AxisId], PerAxis[int]]) -> Self:
    +284        return self.__class__.from_xarray(self._data.expand_dims(dims=dims))
    +285
    +286    def mean(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
    +287        return self.__class__.from_xarray(self._data.mean(dim=dim))
    +288
    +289    def std(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
    +290        return self.__class__.from_xarray(self._data.std(dim=dim))
    +291
    +292    def var(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
    +293        return self.__class__.from_xarray(self._data.var(dim=dim))
    +294
    +295    def pad(
    +296        self,
    +297        pad_width: PerAxis[PadWidthLike],
    +298        mode: PadMode = "symmetric",
    +299    ) -> Self:
    +300        pad_width = {a: PadWidth.create(p) for a, p in pad_width.items()}
    +301        return self.__class__.from_xarray(
    +302            self._data.pad(pad_width=pad_width, mode=mode)
    +303        )
    +304
    +305    def pad_to(
    +306        self,
    +307        sizes: PerAxis[int],
    +308        pad_where: Union[PadWhere, PerAxis[PadWhere]] = "left_and_right",
    +309        mode: PadMode = "symmetric",
    +310    ) -> Self:
    +311        """pad `tensor` to match `sizes`"""
    +312        if isinstance(pad_where, str):
    +313            pad_axis_where: PerAxis[PadWhere] = {a: pad_where for a in self.dims}
    +314        else:
    +315            pad_axis_where = pad_where
    +316
    +317        pad_width: Dict[AxisId, PadWidth] = {}
    +318        for a, s_is in self.sizes.items():
    +319            if a not in sizes or sizes[a] == s_is:
    +320                pad_width[a] = PadWidth(0, 0)
    +321            elif s_is > sizes[a]:
    +322                pad_width[a] = PadWidth(0, 0)
    +323                logger.warning(
    +324                    "Cannot pad axis {} of size {} to smaller size {}",
    +325                    a,
    +326                    s_is,
    +327                    sizes[a],
    +328                )
    +329            elif a not in pad_axis_where:
    +330                raise ValueError(
    +331                    f"Don't know where to pad axis {a}, `pad_where`={pad_where}"
    +332                )
    +333            else:
    +334                pad_this_axis_where = pad_axis_where[a]
    +335                d = sizes[a] - s_is
    +336                if pad_this_axis_where == "left":
    +337                    pad_width[a] = PadWidth(d, 0)
    +338                elif pad_this_axis_where == "right":
    +339                    pad_width[a] = PadWidth(0, d)
    +340                elif pad_this_axis_where == "left_and_right":
    +341                    pad_width[a] = PadWidth(left := d // 2, d - left)
    +342                else:
    +343                    assert_never(pad_this_axis_where)
    +344
    +345        return self.pad(pad_width, mode)
    +346
    +347    def quantile(
    +348        self,
    +349        q: Union[float, Sequence[float]],
    +350        dim: Optional[Union[AxisId, Sequence[AxisId]]] = None,
    +351    ) -> Self:
    +352        assert (
    +353            isinstance(q, (float, int))
    +354            and q >= 0.0
    +355            or not isinstance(q, (float, int))
    +356            and all(qq >= 0.0 for qq in q)
    +357        )
    +358        assert (
    +359            isinstance(q, (float, int))
    +360            and q <= 1.0
    +361            or not isinstance(q, (float, int))
    +362            and all(qq <= 1.0 for qq in q)
    +363        )
    +364        assert dim is None or (
    +365            (quantile_dim := AxisId("quantile")) != dim and quantile_dim not in set(dim)
    +366        )
    +367        return self.__class__.from_xarray(self._data.quantile(q, dim=dim))
    +368
    +369    def resize_to(
    +370        self,
    +371        sizes: PerAxis[int],
    +372        *,
    +373        pad_where: Union[
    +374            PadWhere,
    +375            PerAxis[PadWhere],
    +376        ] = "left_and_right",
    +377        crop_where: Union[
    +378            CropWhere,
    +379            PerAxis[CropWhere],
    +380        ] = "left_and_right",
    +381        pad_mode: PadMode = "symmetric",
    +382    ):
    +383        """return cropped/padded tensor with `sizes`"""
    +384        crop_to_sizes: Dict[AxisId, int] = {}
    +385        pad_to_sizes: Dict[AxisId, int] = {}
    +386        new_axes = dict(sizes)
    +387        for a, s_is in self.sizes.items():
    +388            a = AxisId(str(a))
    +389            _ = new_axes.pop(a, None)
    +390            if a not in sizes or sizes[a] == s_is:
    +391                pass
    +392            elif s_is > sizes[a]:
    +393                crop_to_sizes[a] = sizes[a]
    +394            else:
    +395                pad_to_sizes[a] = sizes[a]
    +396
    +397        tensor = self
    +398        if crop_to_sizes:
    +399            tensor = tensor.crop_to(crop_to_sizes, crop_where=crop_where)
    +400
    +401        if pad_to_sizes:
    +402            tensor = tensor.pad_to(pad_to_sizes, pad_where=pad_where, mode=pad_mode)
    +403
    +404        if new_axes:
    +405            tensor = tensor.expand_dims(new_axes)
    +406
    +407        return tensor
    +408
    +409    def transpose(
    +410        self,
    +411        axes: Sequence[AxisId],
    +412    ) -> Self:
    +413        """return a transposed tensor
    +414
    +415        Args:
    +416            axes: the desired tensor axes
    +417        """
    +418        # expand missing tensor axes
    +419        missing_axes = tuple(a for a in axes if a not in self.dims)
    +420        array = self._data
    +421        if missing_axes:
    +422            array = array.expand_dims(missing_axes)
    +423
    +424        # transpose to the correct axis order
    +425        return self.__class__.from_xarray(array.transpose(*axes))
    +426
    +427    @classmethod
    +428    def _interprete_array_wo_known_axes(cls, array: NDArray[Any]):
    +429        ndim = array.ndim
    +430        if ndim == 2:
    +431            current_axes = (
    +432                v0_5.SpaceInputAxis(id=AxisId("y"), size=array.shape[0]),
    +433                v0_5.SpaceInputAxis(id=AxisId("x"), size=array.shape[1]),
    +434            )
    +435        elif ndim == 3 and any(s <= 3 for s in array.shape):
    +436            current_axes = (
    +437                v0_5.ChannelAxis(
    +438                    channel_names=[
    +439                        v0_5.Identifier(f"channel{i}") for i in range(array.shape[0])
    +440                    ]
    +441                ),
    +442                v0_5.SpaceInputAxis(id=AxisId("y"), size=array.shape[1]),
    +443                v0_5.SpaceInputAxis(id=AxisId("x"), size=array.shape[2]),
    +444            )
    +445        elif ndim == 3:
    +446            current_axes = (
    +447                v0_5.SpaceInputAxis(id=AxisId("z"), size=array.shape[0]),
    +448                v0_5.SpaceInputAxis(id=AxisId("y"), size=array.shape[1]),
    +449                v0_5.SpaceInputAxis(id=AxisId("x"), size=array.shape[2]),
    +450            )
    +451        elif ndim == 4:
    +452            current_axes = (
    +453                v0_5.ChannelAxis(
    +454                    channel_names=[
    +455                        v0_5.Identifier(f"channel{i}") for i in range(array.shape[0])
    +456                    ]
    +457                ),
    +458                v0_5.SpaceInputAxis(id=AxisId("z"), size=array.shape[1]),
    +459                v0_5.SpaceInputAxis(id=AxisId("y"), size=array.shape[2]),
    +460                v0_5.SpaceInputAxis(id=AxisId("x"), size=array.shape[3]),
    +461            )
    +462        elif ndim == 5:
    +463            current_axes = (
    +464                v0_5.BatchAxis(),
    +465                v0_5.ChannelAxis(
    +466                    channel_names=[
    +467                        v0_5.Identifier(f"channel{i}") for i in range(array.shape[1])
    +468                    ]
    +469                ),
    +470                v0_5.SpaceInputAxis(id=AxisId("z"), size=array.shape[2]),
    +471                v0_5.SpaceInputAxis(id=AxisId("y"), size=array.shape[3]),
    +472                v0_5.SpaceInputAxis(id=AxisId("x"), size=array.shape[4]),
    +473            )
    +474        else:
    +475            raise ValueError(f"Could not guess an axis mapping for {array.shape}")
    +476
    +477        return cls(array, dims=tuple(a.id for a in current_axes))
    +
    + + +

    A wrapper around an xr.DataArray for better integration with bioimageio.spec +and improved type annotations.

    +
    + + +
    + +
    + + Tensor( array: numpy.ndarray[typing.Any, numpy.dtype[typing.Any]], dims: Sequence[Union[bioimageio.spec.model.v0_5.AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.core.Axis]]) + + + +
    + +
    55    def __init__(
    +56        self,
    +57        array: NDArray[Any],
    +58        dims: Sequence[Union[AxisId, AxisLike]],
    +59    ) -> None:
    +60        super().__init__()
    +61        axes = tuple(
    +62            a if isinstance(a, AxisId) else AxisInfo.create(a).id for a in dims
    +63        )
    +64        self._data = xr.DataArray(array, dims=axes)
    +
    + + + + +
    +
    + +
    +
    @classmethod
    + + def + from_xarray(cls, data_array: xarray.core.dataarray.DataArray) -> Self: + + + +
    + +
    135    @classmethod
    +136    def from_xarray(cls, data_array: xr.DataArray) -> Self:
    +137        """create a `Tensor` from an xarray data array
    +138
    +139        note for internal use: this factory method is round-trip save
    +140            for any `Tensor`'s  `data` property (an xarray.DataArray).
    +141        """
    +142        return cls(
    +143            array=data_array.data, dims=tuple(AxisId(d) for d in data_array.dims)
    +144        )
    +
    + + +

    create a Tensor from an xarray data array

    + +

    note for internal use: this factory method is round-trip save + for any Tensor's data property (an xarray.DataArray).

    +
    + + +
    +
    + +
    +
    @classmethod
    + + def + from_numpy( cls, array: numpy.ndarray[typing.Any, numpy.dtype[typing.Any]], *, dims: Union[bioimageio.spec.model.v0_5.AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.core.Axis, Sequence[Union[bioimageio.spec.model.v0_5.AxisId, Literal['b', 'i', 't', 'c', 'z', 'y', 'x'], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexInputAxis, bioimageio.spec.model.v0_5.TimeInputAxis, bioimageio.spec.model.v0_5.SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_5.BatchAxis, bioimageio.spec.model.v0_5.ChannelAxis, bioimageio.spec.model.v0_5.IndexOutputAxis, Annotated[Union[Annotated[bioimageio.spec.model.v0_5.TimeOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxis, Tag(tag='wo_halo')], Annotated[bioimageio.spec.model.v0_5.SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], bioimageio.core.Axis]], NoneType]) -> Tensor: + + + +
    + +
    146    @classmethod
    +147    def from_numpy(
    +148        cls,
    +149        array: NDArray[Any],
    +150        *,
    +151        dims: Optional[Union[AxisLike, Sequence[AxisLike]]],
    +152    ) -> Tensor:
    +153        """create a `Tensor` from a numpy array
    +154
    +155        Args:
    +156            array: the nd numpy array
    +157            axes: A description of the array's axes,
    +158                if None axes are guessed (which might fail and raise a ValueError.)
    +159
    +160        Raises:
    +161            ValueError: if `axes` is None and axes guessing fails.
    +162        """
    +163
    +164        if dims is None:
    +165            return cls._interprete_array_wo_known_axes(array)
    +166        elif isinstance(dims, (str, Axis, v0_5.AxisBase)):
    +167            dims = [dims]
    +168
    +169        axis_infos = [AxisInfo.create(a) for a in dims]
    +170        original_shape = tuple(array.shape)
    +171
    +172        successful_view = _get_array_view(array, axis_infos)
    +173        if successful_view is None:
    +174            raise ValueError(
    +175                f"Array shape {original_shape} does not map to axes {dims}"
    +176            )
    +177
    +178        return Tensor(successful_view, dims=tuple(a.id for a in axis_infos))
    +
    + + +

    create a Tensor from a numpy array

    + +
    Arguments:
    + +
      +
    • array: the nd numpy array
    • +
    • axes: A description of the array's axes, +if None axes are guessed (which might fail and raise a ValueError.)
    • +
    + +
    Raises:
    + +
      +
    • ValueError: if axes is None and axes guessing fails.
    • +
    +
    + + +
    +
    + +
    + data + + + +
    + +
    180    @property
    +181    def data(self):
    +182        return self._data
    +
    + + + + +
    +
    + +
    + dims + + + +
    + +
    184    @property
    +185    def dims(self):  # TODO: rename to `axes`?
    +186        """Tuple of dimension names associated with this tensor."""
    +187        return cast(Tuple[AxisId, ...], self._data.dims)
    +
    + + +

    Tuple of dimension names associated with this tensor.

    +
    + + +
    +
    + +
    + tagged_shape + + + +
    + +
    189    @property
    +190    def tagged_shape(self):
    +191        """(alias for `sizes`) Ordered, immutable mapping from axis ids to lengths."""
    +192        return self.sizes
    +
    + + +

    (alias for sizes) Ordered, immutable mapping from axis ids to lengths.

    +
    + + +
    +
    + +
    + shape_tuple + + + +
    + +
    194    @property
    +195    def shape_tuple(self):
    +196        """Tuple of tensor axes lengths"""
    +197        return self._data.shape
    +
    + + +

    Tuple of tensor axes lengths

    +
    + + +
    +
    + +
    + size + + + +
    + +
    199    @property
    +200    def size(self):
    +201        """Number of elements in the tensor.
    +202
    +203        Equal to math.prod(tensor.shape), i.e., the product of the tensors’ dimensions.
    +204        """
    +205        return self._data.size
    +
    + + +

    Number of elements in the tensor.

    + +

    Equal to math.prod(tensor.shape), i.e., the product of the tensors’ dimensions.

    +
    + + +
    +
    + +
    + + def + sum( self, dim: Union[bioimageio.spec.model.v0_5.AxisId, Sequence[bioimageio.spec.model.v0_5.AxisId], NoneType] = None) -> Self: + + + +
    + +
    207    def sum(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
    +208        """Reduce this Tensor's data by applying sum along some dimension(s)."""
    +209        return self.__class__.from_xarray(self._data.sum(dim=dim))
    +
    + + +

    Reduce this Tensor's data by applying sum along some dimension(s).

    +
    + + +
    +
    + +
    + ndim + + + +
    + +
    211    @property
    +212    def ndim(self):
    +213        """Number of tensor dimensions."""
    +214        return self._data.ndim
    +
    + + +

    Number of tensor dimensions.

    +
    + + +
    +
    + +
    + dtype: Literal['bool', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] + + + +
    + +
    216    @property
    +217    def dtype(self) -> DTypeStr:
    +218        dt = str(self.data.dtype)  # pyright: ignore[reportUnknownArgumentType]
    +219        assert dt in get_args(DTypeStr)
    +220        return dt  # pyright: ignore[reportReturnType]
    +
    + + + + +
    +
    + +
    + sizes + + + +
    + +
    222    @property
    +223    def sizes(self):
    +224        """Ordered, immutable mapping from axis ids to axis lengths."""
    +225        return cast(Mapping[AxisId, int], self.data.sizes)
    +
    + + +

    Ordered, immutable mapping from axis ids to axis lengths.

    +
    + + +
    +
    + +
    + + def + astype( self, dtype: Literal['bool', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'], *, copy: bool = False): + + + +
    + +
    227    def astype(self, dtype: DTypeStr, *, copy: bool = False):
    +228        """Return tensor cast to `dtype`
    +229
    +230        note: if dtype is already satisfied copy if `copy`"""
    +231        return self.__class__.from_xarray(self._data.astype(dtype, copy=copy))
    +
    + + +

    Return tensor cast to dtype

    + +

    note: if dtype is already satisfied copy if copy

    +
    + + +
    +
    + +
    + + def + clip(self, min: Optional[float] = None, max: Optional[float] = None): + + + +
    + +
    233    def clip(self, min: Optional[float] = None, max: Optional[float] = None):
    +234        """Return a tensor whose values are limited to [min, max].
    +235        At least one of max or min must be given."""
    +236        return self.__class__.from_xarray(self._data.clip(min, max))
    +
    + + +

    Return a tensor whose values are limited to [min, max]. +At least one of max or min must be given.

    +
    + + +
    +
    + +
    + + def + crop_to( self, sizes: Mapping[bioimageio.spec.model.v0_5.AxisId, int], crop_where: Union[Literal['left', 'right', 'left_and_right'], Mapping[bioimageio.spec.model.v0_5.AxisId, Literal['left', 'right', 'left_and_right']]] = 'left_and_right') -> Self: + + + +
    + +
    238    def crop_to(
    +239        self,
    +240        sizes: PerAxis[int],
    +241        crop_where: Union[
    +242            CropWhere,
    +243            PerAxis[CropWhere],
    +244        ] = "left_and_right",
    +245    ) -> Self:
    +246        """crop to match `sizes`"""
    +247        if isinstance(crop_where, str):
    +248            crop_axis_where: PerAxis[CropWhere] = {a: crop_where for a in self.dims}
    +249        else:
    +250            crop_axis_where = crop_where
    +251
    +252        slices: Dict[AxisId, SliceInfo] = {}
    +253
    +254        for a, s_is in self.sizes.items():
    +255            if a not in sizes or sizes[a] == s_is:
    +256                pass
    +257            elif sizes[a] > s_is:
    +258                logger.warning(
    +259                    "Cannot crop axis {} of size {} to larger size {}",
    +260                    a,
    +261                    s_is,
    +262                    sizes[a],
    +263                )
    +264            elif a not in crop_axis_where:
    +265                raise ValueError(
    +266                    f"Don't know where to crop axis {a}, `crop_where`={crop_where}"
    +267                )
    +268            else:
    +269                crop_this_axis_where = crop_axis_where[a]
    +270                if crop_this_axis_where == "left":
    +271                    slices[a] = SliceInfo(s_is - sizes[a], s_is)
    +272                elif crop_this_axis_where == "right":
    +273                    slices[a] = SliceInfo(0, sizes[a])
    +274                elif crop_this_axis_where == "left_and_right":
    +275                    slices[a] = SliceInfo(
    +276                        start := (s_is - sizes[a]) // 2, sizes[a] + start
    +277                    )
    +278                else:
    +279                    assert_never(crop_this_axis_where)
    +280
    +281        return self[slices]
    +
    + + +

    crop to match sizes

    +
    + + +
    +
    + +
    + + def + expand_dims( self, dims: Union[Sequence[bioimageio.spec.model.v0_5.AxisId], Mapping[bioimageio.spec.model.v0_5.AxisId, int]]) -> Self: + + + +
    + +
    283    def expand_dims(self, dims: Union[Sequence[AxisId], PerAxis[int]]) -> Self:
    +284        return self.__class__.from_xarray(self._data.expand_dims(dims=dims))
    +
    + + + + +
    +
    + +
    + + def + mean( self, dim: Union[bioimageio.spec.model.v0_5.AxisId, Sequence[bioimageio.spec.model.v0_5.AxisId], NoneType] = None) -> Self: + + + +
    + +
    286    def mean(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
    +287        return self.__class__.from_xarray(self._data.mean(dim=dim))
    +
    + + + + +
    +
    + +
    + + def + std( self, dim: Union[bioimageio.spec.model.v0_5.AxisId, Sequence[bioimageio.spec.model.v0_5.AxisId], NoneType] = None) -> Self: + + + +
    + +
    289    def std(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
    +290        return self.__class__.from_xarray(self._data.std(dim=dim))
    +
    + + + + +
    +
    + +
    + + def + var( self, dim: Union[bioimageio.spec.model.v0_5.AxisId, Sequence[bioimageio.spec.model.v0_5.AxisId], NoneType] = None) -> Self: + + + +
    + +
    292    def var(self, dim: Optional[Union[AxisId, Sequence[AxisId]]] = None) -> Self:
    +293        return self.__class__.from_xarray(self._data.var(dim=dim))
    +
    + + + + +
    +
    + +
    + + def + pad( self, pad_width: Mapping[bioimageio.spec.model.v0_5.AxisId, Union[int, Tuple[int, int], bioimageio.core.common.PadWidth]], mode: Literal['edge', 'reflect', 'symmetric'] = 'symmetric') -> Self: + + + +
    + +
    295    def pad(
    +296        self,
    +297        pad_width: PerAxis[PadWidthLike],
    +298        mode: PadMode = "symmetric",
    +299    ) -> Self:
    +300        pad_width = {a: PadWidth.create(p) for a, p in pad_width.items()}
    +301        return self.__class__.from_xarray(
    +302            self._data.pad(pad_width=pad_width, mode=mode)
    +303        )
    +
    + + + + +
    +
    + +
    + + def + pad_to( self, sizes: Mapping[bioimageio.spec.model.v0_5.AxisId, int], pad_where: Union[Literal['left', 'right', 'left_and_right'], Mapping[bioimageio.spec.model.v0_5.AxisId, Literal['left', 'right', 'left_and_right']]] = 'left_and_right', mode: Literal['edge', 'reflect', 'symmetric'] = 'symmetric') -> Self: + + + +
    + +
    305    def pad_to(
    +306        self,
    +307        sizes: PerAxis[int],
    +308        pad_where: Union[PadWhere, PerAxis[PadWhere]] = "left_and_right",
    +309        mode: PadMode = "symmetric",
    +310    ) -> Self:
    +311        """pad `tensor` to match `sizes`"""
    +312        if isinstance(pad_where, str):
    +313            pad_axis_where: PerAxis[PadWhere] = {a: pad_where for a in self.dims}
    +314        else:
    +315            pad_axis_where = pad_where
    +316
    +317        pad_width: Dict[AxisId, PadWidth] = {}
    +318        for a, s_is in self.sizes.items():
    +319            if a not in sizes or sizes[a] == s_is:
    +320                pad_width[a] = PadWidth(0, 0)
    +321            elif s_is > sizes[a]:
    +322                pad_width[a] = PadWidth(0, 0)
    +323                logger.warning(
    +324                    "Cannot pad axis {} of size {} to smaller size {}",
    +325                    a,
    +326                    s_is,
    +327                    sizes[a],
    +328                )
    +329            elif a not in pad_axis_where:
    +330                raise ValueError(
    +331                    f"Don't know where to pad axis {a}, `pad_where`={pad_where}"
    +332                )
    +333            else:
    +334                pad_this_axis_where = pad_axis_where[a]
    +335                d = sizes[a] - s_is
    +336                if pad_this_axis_where == "left":
    +337                    pad_width[a] = PadWidth(d, 0)
    +338                elif pad_this_axis_where == "right":
    +339                    pad_width[a] = PadWidth(0, d)
    +340                elif pad_this_axis_where == "left_and_right":
    +341                    pad_width[a] = PadWidth(left := d // 2, d - left)
    +342                else:
    +343                    assert_never(pad_this_axis_where)
    +344
    +345        return self.pad(pad_width, mode)
    +
    + + +

    pad tensor to match sizes

    +
    + + +
    +
    + +
    + + def + quantile( self, q: Union[float, Sequence[float]], dim: Union[bioimageio.spec.model.v0_5.AxisId, Sequence[bioimageio.spec.model.v0_5.AxisId], NoneType] = None) -> Self: + + + +
    + +
    347    def quantile(
    +348        self,
    +349        q: Union[float, Sequence[float]],
    +350        dim: Optional[Union[AxisId, Sequence[AxisId]]] = None,
    +351    ) -> Self:
    +352        assert (
    +353            isinstance(q, (float, int))
    +354            and q >= 0.0
    +355            or not isinstance(q, (float, int))
    +356            and all(qq >= 0.0 for qq in q)
    +357        )
    +358        assert (
    +359            isinstance(q, (float, int))
    +360            and q <= 1.0
    +361            or not isinstance(q, (float, int))
    +362            and all(qq <= 1.0 for qq in q)
    +363        )
    +364        assert dim is None or (
    +365            (quantile_dim := AxisId("quantile")) != dim and quantile_dim not in set(dim)
    +366        )
    +367        return self.__class__.from_xarray(self._data.quantile(q, dim=dim))
    +
    + + + + +
    +
    + +
    + + def + resize_to( self, sizes: Mapping[bioimageio.spec.model.v0_5.AxisId, int], *, pad_where: Union[Literal['left', 'right', 'left_and_right'], Mapping[bioimageio.spec.model.v0_5.AxisId, Literal['left', 'right', 'left_and_right']]] = 'left_and_right', crop_where: Union[Literal['left', 'right', 'left_and_right'], Mapping[bioimageio.spec.model.v0_5.AxisId, Literal['left', 'right', 'left_and_right']]] = 'left_and_right', pad_mode: Literal['edge', 'reflect', 'symmetric'] = 'symmetric'): + + + +
    + +
    369    def resize_to(
    +370        self,
    +371        sizes: PerAxis[int],
    +372        *,
    +373        pad_where: Union[
    +374            PadWhere,
    +375            PerAxis[PadWhere],
    +376        ] = "left_and_right",
    +377        crop_where: Union[
    +378            CropWhere,
    +379            PerAxis[CropWhere],
    +380        ] = "left_and_right",
    +381        pad_mode: PadMode = "symmetric",
    +382    ):
    +383        """return cropped/padded tensor with `sizes`"""
    +384        crop_to_sizes: Dict[AxisId, int] = {}
    +385        pad_to_sizes: Dict[AxisId, int] = {}
    +386        new_axes = dict(sizes)
    +387        for a, s_is in self.sizes.items():
    +388            a = AxisId(str(a))
    +389            _ = new_axes.pop(a, None)
    +390            if a not in sizes or sizes[a] == s_is:
    +391                pass
    +392            elif s_is > sizes[a]:
    +393                crop_to_sizes[a] = sizes[a]
    +394            else:
    +395                pad_to_sizes[a] = sizes[a]
    +396
    +397        tensor = self
    +398        if crop_to_sizes:
    +399            tensor = tensor.crop_to(crop_to_sizes, crop_where=crop_where)
    +400
    +401        if pad_to_sizes:
    +402            tensor = tensor.pad_to(pad_to_sizes, pad_where=pad_where, mode=pad_mode)
    +403
    +404        if new_axes:
    +405            tensor = tensor.expand_dims(new_axes)
    +406
    +407        return tensor
    +
    + + +

    return cropped/padded tensor with sizes

    +
    + + +
    +
    + +
    + + def + transpose(self, axes: Sequence[bioimageio.spec.model.v0_5.AxisId]) -> Self: + + + +
    + +
    409    def transpose(
    +410        self,
    +411        axes: Sequence[AxisId],
    +412    ) -> Self:
    +413        """return a transposed tensor
    +414
    +415        Args:
    +416            axes: the desired tensor axes
    +417        """
    +418        # expand missing tensor axes
    +419        missing_axes = tuple(a for a in axes if a not in self.dims)
    +420        array = self._data
    +421        if missing_axes:
    +422            array = array.expand_dims(missing_axes)
    +423
    +424        # transpose to the correct axis order
    +425        return self.__class__.from_xarray(array.transpose(*axes))
    +
    + + +

    return a transposed tensor

    + +
    Arguments:
    + +
      +
    • axes: the desired tensor axes
    • +
    +
    + + +
    +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec.html b/bioimageio/spec.html new file mode 100644 index 00000000..6a45fad7 --- /dev/null +++ b/bioimageio/spec.html @@ -0,0 +1,4629 @@ + + + + + + + bioimageio.spec API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec

    + +

    +
    + + + + + +
     1"""
    + 2.. include:: ../../README.md
    + 3"""
    + 4
    + 5from . import (
    + 6    application,
    + 7    common,
    + 8    conda_env,
    + 9    dataset,
    +10    generic,
    +11    model,
    +12    pretty_validation_errors,
    +13    summary,
    +14    utils,
    +15)
    +16from ._description import (
    +17    LatestResourceDescr,
    +18    ResourceDescr,
    +19    SpecificResourceDescr,
    +20    build_description,
    +21    dump_description,
    +22    validate_format,
    +23)
    +24from ._internal import settings
    +25from ._internal.common_nodes import InvalidDescr
    +26from ._internal.constants import VERSION
    +27from ._internal.validation_context import ValidationContext
    +28from ._io import (
    +29    load_dataset_description,
    +30    load_description,
    +31    load_description_and_validate_format_only,
    +32    load_model_description,
    +33    save_bioimageio_yaml_only,
    +34)
    +35from ._package import (
    +36    get_resource_package_content,
    +37    save_bioimageio_package,
    +38    save_bioimageio_package_as_folder,
    +39    save_bioimageio_package_to_stream,
    +40)
    +41from .application import AnyApplicationDescr, ApplicationDescr
    +42from .dataset import AnyDatasetDescr, DatasetDescr
    +43from .generic import AnyGenericDescr, GenericDescr
    +44from .model import AnyModelDescr, ModelDescr
    +45from .notebook import AnyNotebookDescr, NotebookDescr
    +46from .pretty_validation_errors import enable_pretty_validation_errors_in_ipynb
    +47from .summary import ValidationSummary
    +48
    +49__version__ = VERSION
    +50
    +51__all__ = [
    +52    "__version__",
    +53    "AnyApplicationDescr",
    +54    "AnyDatasetDescr",
    +55    "AnyGenericDescr",
    +56    "AnyModelDescr",
    +57    "AnyNotebookDescr",
    +58    "application",
    +59    "ApplicationDescr",
    +60    "build_description",
    +61    "common",
    +62    "conda_env",
    +63    "dataset",
    +64    "DatasetDescr",
    +65    "dump_description",
    +66    "enable_pretty_validation_errors_in_ipynb",
    +67    "generic",
    +68    "GenericDescr",
    +69    "get_resource_package_content",
    +70    "InvalidDescr",
    +71    "LatestResourceDescr",
    +72    "load_dataset_description",
    +73    "load_description_and_validate_format_only",
    +74    "load_description",
    +75    "load_model_description",
    +76    "model",
    +77    "ModelDescr",
    +78    "NotebookDescr",
    +79    "pretty_validation_errors",
    +80    "ResourceDescr",
    +81    "save_bioimageio_package_as_folder",
    +82    "save_bioimageio_package_to_stream",
    +83    "save_bioimageio_package",
    +84    "save_bioimageio_yaml_only",
    +85    "settings",
    +86    "SpecificResourceDescr",
    +87    "summary",
    +88    "utils",
    +89    "validate_format",
    +90    "ValidationContext",
    +91    "ValidationSummary",
    +92]
    +
    + + +
    +
    +
    + __version__ = +'0.5.3.5' + + +
    + + + + +
    +
    +
    + AnyApplicationDescr = + + typing.Annotated[typing.Union[bioimageio.spec.application.v0_2.ApplicationDescr, ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + + + +
    +
    +
    + AnyDatasetDescr = + + typing.Annotated[typing.Union[bioimageio.spec.dataset.v0_2.DatasetDescr, DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + + + +
    +
    +
    + AnyGenericDescr = + + typing.Annotated[typing.Union[bioimageio.spec.generic.v0_2.GenericDescr, GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + + + +
    +
    +
    + AnyModelDescr = + + typing.Annotated[typing.Union[bioimageio.spec.model.v0_4.ModelDescr, ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + + + +
    +
    +
    + AnyNotebookDescr = + + typing.Annotated[typing.Union[NotebookDescr, NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + + + +
    +
    + +
    + + class + ApplicationDescr(bioimageio.spec.generic.v0_3.GenericDescrBase): + + + +
    + +
    32class ApplicationDescr(GenericDescrBase, title="bioimage.io application specification"):
    +33    """Bioimage.io description of an application."""
    +34
    +35    type: Literal["application"] = "application"
    +36
    +37    id: Optional[ApplicationId] = None
    +38    """bioimage.io-wide unique resource identifier
    +39    assigned by bioimage.io; version **un**specific."""
    +40
    +41    parent: Optional[ApplicationId] = None
    +42    """The description from which this one is derived"""
    +43
    +44    source: Annotated[
    +45        Optional[ImportantFileSource],
    +46        Field(description="URL or path to the source of the application"),
    +47    ] = None
    +48    """The primary source of the application"""
    +
    + + +

    Bioimage.io description of an application.

    +
    + + +
    +
    + type: Literal['application'] + + +
    + + + + +
    +
    + + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    + + + +

    The description from which this one is derived

    +
    + + +
    +
    +
    + source: Annotated[Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, description='URL or path to the source of the application')] + + +
    + + +

    The primary source of the application

    +
    + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.3.0' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 3, 0) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    + +
    + + def + build_description( content: Dict[str, YamlValue], /, *, context: Optional[ValidationContext] = None, format_version: Union[Literal['latest', 'discover'], str] = 'discover') -> Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[NotebookDescr, NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], InvalidDescr]: + + + +
    + +
    130def build_description(
    +131    content: BioimageioYamlContent,
    +132    /,
    +133    *,
    +134    context: Optional[ValidationContext] = None,
    +135    format_version: Union[FormatVersionPlaceholder, str] = DISCOVER,
    +136) -> Union[ResourceDescr, InvalidDescr]:
    +137    """build a bioimage.io resource description from an RDF's content.
    +138
    +139    Use `load_description` if you want to build a resource description from an rdf.yaml
    +140    or bioimage.io zip-package.
    +141
    +142    Args:
    +143        content: loaded rdf.yaml file (loaded with YAML, not bioimageio.spec)
    +144        context: validation context to use during validation
    +145        format_version: (optional) use this argument to load the resource and
    +146                        convert its metadata to a higher format_version
    +147
    +148    Returns:
    +149        An object holding all metadata of the bioimage.io resource
    +150
    +151    """
    +152
    +153    return build_description_impl(
    +154        content,
    +155        context=context,
    +156        format_version=format_version,
    +157        get_rd_class=_get_rd_class,
    +158    )
    +
    + + +

    build a bioimage.io resource description from an RDF's content.

    + +

    Use load_description if you want to build a resource description from an rdf.yaml +or bioimage.io zip-package.

    + +
    Arguments:
    + +
      +
    • content: loaded rdf.yaml file (loaded with YAML, not bioimageio.spec)
    • +
    • context: validation context to use during validation
    • +
    • format_version: (optional) use this argument to load the resource and +convert its metadata to a higher format_version
    • +
    + +
    Returns:
    + +
    +

    An object holding all metadata of the bioimage.io resource

    +
    +
    + + +
    +
    + +
    + + class + DatasetDescr(bioimageio.spec.generic.v0_3.GenericDescrBase): + + + +
    + +
     39class DatasetDescr(GenericDescrBase, title="bioimage.io dataset specification"):
    + 40    """A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage
    + 41    processing.
    + 42    """
    + 43
    + 44    type: Literal["dataset"] = "dataset"
    + 45
    + 46    id: Optional[DatasetId] = None
    + 47    """bioimage.io-wide unique resource identifier
    + 48    assigned by bioimage.io; version **un**specific."""
    + 49
    + 50    parent: Optional[DatasetId] = None
    + 51    """The description from which this one is derived"""
    + 52
    + 53    source: Optional[HttpUrl] = None
    + 54    """"URL to the source of the dataset."""
    + 55
    + 56    @model_validator(mode="before")
    + 57    @classmethod
    + 58    def _convert(cls, data: Dict[str, Any], /) -> Dict[str, Any]:
    + 59        if (
    + 60            data.get("type") == "dataset"
    + 61            and isinstance(fv := data.get("format_version"), str)
    + 62            and fv.startswith("0.2.")
    + 63        ):
    + 64            old = DatasetDescr02.load(data)
    + 65            if isinstance(old, InvalidDescr):
    + 66                return data
    + 67
    + 68            return cast(
    + 69                Dict[str, Any],
    + 70                (cls if TYPE_CHECKING else dict)(
    + 71                    attachments=(
    + 72                        []
    + 73                        if old.attachments is None
    + 74                        else [FileDescr(source=f) for f in old.attachments.files]
    + 75                    ),
    + 76                    authors=[
    + 77                        _author_conv.convert_as_dict(a) for a in old.authors
    + 78                    ],  # pyright: ignore[reportArgumentType]
    + 79                    badges=old.badges,
    + 80                    cite=[
    + 81                        {"text": c.text, "doi": c.doi, "url": c.url} for c in old.cite
    + 82                    ],  # pyright: ignore[reportArgumentType]
    + 83                    config=old.config,
    + 84                    covers=old.covers,
    + 85                    description=old.description,
    + 86                    documentation=cast(DocumentationSource, old.documentation),
    + 87                    format_version="0.3.0",
    + 88                    git_repo=old.git_repo,  # pyright: ignore[reportArgumentType]
    + 89                    icon=old.icon,
    + 90                    id=None if old.id is None else DatasetId(old.id),
    + 91                    license=old.license,  # type: ignore
    + 92                    links=old.links,
    + 93                    maintainers=[
    + 94                        _maintainer_conv.convert_as_dict(m) for m in old.maintainers
    + 95                    ],  # pyright: ignore[reportArgumentType]
    + 96                    name=old.name,
    + 97                    source=old.source,
    + 98                    tags=old.tags,
    + 99                    type=old.type,
    +100                    uploader=old.uploader,
    +101                    version=old.version,
    +102                    **(old.model_extra or {}),
    +103                ),
    +104            )
    +105
    +106        return data
    +
    + + +

    A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage +processing.

    +
    + + +
    +
    + type: Literal['dataset'] + + +
    + + + + +
    +
    + + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    +
    + parent: Optional[bioimageio.spec.dataset.v0_3.DatasetId] + + +
    + + +

    The description from which this one is derived

    +
    + + +
    +
    +
    + source: Optional[bioimageio.spec._internal.url.HttpUrl] + + +
    + + +

    "URL to the source of the dataset.

    +
    + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.3.0' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 3, 0) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    + +
    + + def + dump_description( rd: Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[NotebookDescr, NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], InvalidDescr], exclude_unset: bool = True) -> Dict[str, YamlValue]: + + + +
    + +
    65def dump_description(
    +66    rd: Union[ResourceDescr, InvalidDescr], exclude_unset: bool = True
    +67) -> BioimageioYamlContent:
    +68    """Converts a resource to a dictionary containing only simple types that can directly be serialzed to YAML."""
    +69    return rd.model_dump(mode="json", exclude_unset=exclude_unset)
    +
    + + +

    Converts a resource to a dictionary containing only simple types that can directly be serialzed to YAML.

    +
    + + +
    +
    + +
    + + def + enable_pretty_validation_errors_in_ipynb(): + + + +
    + +
    72    def enable_pretty_validation_errors_in_ipynb():
    +73        """A modestly hacky way to display prettified validaiton error messages and traceback
    +74        in interactive Python notebooks"""
    +75        ipy = get_ipython()
    +76        if ipy is not None:
    +77            ipy.set_custom_exc((ValidationError,), _custom_exception_handler)
    +
    + + +

    A modestly hacky way to display prettified validaiton error messages and traceback +in interactive Python notebooks

    +
    + + +
    +
    + +
    + + class + GenericDescr(bioimageio.spec.generic.v0_3.GenericDescrBase): + + + +
    + +
    410class GenericDescr(
    +411    GenericDescrBase, extra="ignore", title="bioimage.io generic specification"
    +412):
    +413    """Specification of the fields used in a generic bioimage.io-compliant resource description file (RDF).
    +414
    +415    An RDF is a YAML file that describes a resource such as a model, a dataset, or a notebook.
    +416    Note that those resources are described with a type-specific RDF.
    +417    Use this generic resource description, if none of the known specific types matches your resource.
    +418    """
    +419
    +420    type: Annotated[str, LowerCase] = Field("generic", frozen=True)
    +421    """The resource type assigns a broad category to the resource."""
    +422
    +423    id: Optional[ResourceId] = None
    +424    """bioimage.io-wide unique resource identifier
    +425    assigned by bioimage.io; version **un**specific."""
    +426
    +427    parent: Optional[ResourceId] = None
    +428    """The description from which this one is derived"""
    +429
    +430    source: Optional[HttpUrl] = None
    +431    """The primary source of the resource"""
    +432
    +433    @field_validator("type", mode="after")
    +434    @classmethod
    +435    def check_specific_types(cls, value: str) -> str:
    +436        if value in KNOWN_SPECIFIC_RESOURCE_TYPES:
    +437            raise ValueError(
    +438                f"Use the {value} description instead of this generic description for"
    +439                + f" your '{value}' resource."
    +440            )
    +441
    +442        return value
    +
    + + +

    Specification of the fields used in a generic bioimage.io-compliant resource description file (RDF).

    + +

    An RDF is a YAML file that describes a resource such as a model, a dataset, or a notebook. +Note that those resources are described with a type-specific RDF. +Use this generic resource description, if none of the known specific types matches your resource.

    +
    + + +
    +
    + type: Annotated[str, Annotated[~_StrType, Predicate(str.islower)]] + + +
    + + +

    The resource type assigns a broad category to the resource.

    +
    + + +
    +
    + + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    +
    + parent: Optional[bioimageio.spec.generic.v0_3.ResourceId] + + +
    + + +

    The description from which this one is derived

    +
    + + +
    +
    +
    + source: Optional[bioimageio.spec._internal.url.HttpUrl] + + +
    + + +

    The primary source of the resource

    +
    + + +
    +
    + +
    +
    @field_validator('type', mode='after')
    +
    @classmethod
    + + def + check_specific_types(cls, value: str) -> str: + + + +
    + +
    433    @field_validator("type", mode="after")
    +434    @classmethod
    +435    def check_specific_types(cls, value: str) -> str:
    +436        if value in KNOWN_SPECIFIC_RESOURCE_TYPES:
    +437            raise ValueError(
    +438                f"Use the {value} description instead of this generic description for"
    +439                + f" your '{value}' resource."
    +440            )
    +441
    +442        return value
    +
    + + + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.3.0' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 3, 0) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    + +
    + + def + get_resource_package_content( rd: Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[NotebookDescr, NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], /, *, bioimageio_yaml_file_name: str = 'rdf.yaml', weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None) -> Dict[str, Union[bioimageio.spec._internal.url.HttpUrl, Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute)], Dict[str, YamlValue], zipp.Path]]: + + + +
    + +
    32def get_resource_package_content(
    +33    rd: ResourceDescr,
    +34    /,
    +35    *,
    +36    bioimageio_yaml_file_name: FileName = BIOIMAGEIO_YAML,
    +37    weights_priority_order: Optional[Sequence[WeightsFormat]] = None,  # model only
    +38) -> Dict[FileName, Union[HttpUrl, AbsoluteFilePath, BioimageioYamlContent, ZipPath]]:
    +39    """
    +40    Args:
    +41        rd: resource description
    +42        bioimageio_yaml_file_name: RDF file name
    +43        # for model resources only:
    +44        weights_priority_order: If given, only the first weights format present in the model is included.
    +45                                If none of the prioritized weights formats is found a ValueError is raised.
    +46    """
    +47    os_friendly_name = get_os_friendly_file_name(rd.name)
    +48    bioimageio_yaml_file_name = bioimageio_yaml_file_name.format(
    +49        name=os_friendly_name, type=rd.type
    +50    )
    +51
    +52    bioimageio_yaml_file_name = ensure_is_valid_bioimageio_yaml_name(
    +53        bioimageio_yaml_file_name
    +54    )
    +55    content: Dict[FileName, Union[HttpUrl, AbsoluteFilePath, ZipPath]] = {}
    +56    with PackagingContext(
    +57        bioimageio_yaml_file_name=bioimageio_yaml_file_name,
    +58        file_sources=content,
    +59        weights_priority_order=weights_priority_order,
    +60    ):
    +61        rdf_content: BioimageioYamlContent = rd.model_dump(
    +62            mode="json", exclude_unset=True
    +63        )
    +64
    +65    _ = rdf_content.pop("rdf_source", None)
    +66
    +67    return {**content, bioimageio_yaml_file_name: rdf_content}
    +
    + + +
    Arguments:
    + +
      +
    • rd: resource description
    • +
    • bioimageio_yaml_file_name: RDF file name
    • +
    • # for model resources only:
    • +
    • weights_priority_order: If given, only the first weights format present in the model is included. +If none of the prioritized weights formats is found a ValueError is raised.
    • +
    +
    + + +
    +
    + +
    + + class + InvalidDescr(bioimageio.spec._internal.common_nodes.ResourceDescrBase): + + + +
    + +
    514class InvalidDescr(
    +515    ResourceDescrBase,
    +516    extra="allow",
    +517    title="An invalid resource description",
    +518):
    +519    """A representation of an invalid resource description"""
    +520
    +521    type: Any = "unknown"
    +522    format_version: Any = "unknown"
    +523    fields_to_set_explicitly: ClassVar[FrozenSet[LiteralString]] = frozenset()
    +
    + + +

    A representation of an invalid resource description

    +
    + + +
    +
    + type: Any + + +
    + + + + +
    +
    +
    + format_version: Any + + +
    + + + + +
    +
    +
    + fields_to_set_explicitly: ClassVar[FrozenSet[LiteralString]] = +frozenset() + + +
    + + +

    set set these fields explicitly with their default value if they are not set, +such that they are always included even when dumping with 'exlude_unset'

    +
    + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'unknown' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 0, 0) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    +
    +
    +
    + LatestResourceDescr = + + typing.Union[typing.Annotated[typing.Union[ApplicationDescr, DatasetDescr, ModelDescr, NotebookDescr], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], GenericDescr] + + +
    + + + + +
    +
    + +
    + + def + load_dataset_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile], /, *, format_version: Union[Literal['discover'], Literal['latest'], str] = 'discover', perform_io_checks: bool = True, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]: + + + +
    + +
     98def load_dataset_description(
    + 99    source: Union[PermissiveFileSource, ZipFile],
    +100    /,
    +101    *,
    +102    format_version: Union[Literal["discover"], Literal["latest"], str] = DISCOVER,
    +103    perform_io_checks: bool = settings.perform_io_checks,
    +104    known_files: Optional[Dict[str, Sha256]] = None,
    +105) -> AnyDatasetDescr:
    +106    """same as `load_description`, but addtionally ensures that the loaded
    +107    description is valid and of type 'dataset'.
    +108    """
    +109    rd = load_description(
    +110        source,
    +111        format_version=format_version,
    +112        perform_io_checks=perform_io_checks,
    +113        known_files=known_files,
    +114    )
    +115    return ensure_description_is_dataset(rd)
    +
    + + +

    same as load_description, but addtionally ensures that the loaded +description is valid and of type 'dataset'.

    +
    + + +
    +
    + +
    + + def + load_description_and_validate_format_only( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile], /, *, format_version: Union[Literal['discover'], Literal['latest'], str] = 'discover', perform_io_checks: bool = True, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> ValidationSummary: + + + +
    + +
    137def load_description_and_validate_format_only(
    +138    source: Union[PermissiveFileSource, ZipFile],
    +139    /,
    +140    *,
    +141    format_version: Union[Literal["discover"], Literal["latest"], str] = DISCOVER,
    +142    perform_io_checks: bool = settings.perform_io_checks,
    +143    known_files: Optional[Dict[str, Sha256]] = None,
    +144) -> ValidationSummary:
    +145    """load a bioimage.io resource description
    +146
    +147    Args:
    +148        source: Path or URL to an rdf.yaml or a bioimage.io package
    +149                (zip-file with rdf.yaml in it).
    +150        format_version: (optional) Use this argument to load the resource and
    +151                        convert its metadata to a higher format_version.
    +152        perform_io_checks: Wether or not to perform validation that requires file io,
    +153                           e.g. downloading a remote files. The existence of local
    +154                           absolute file paths is still being checked.
    +155        known_files: Allows to bypass download and hashing of referenced files
    +156                     (even if perform_io_checks is True).
    +157
    +158    Returns:
    +159        Validation summary of the bioimage.io resource found at `source`.
    +160
    +161    """
    +162    rd = load_description(
    +163        source,
    +164        format_version=format_version,
    +165        perform_io_checks=perform_io_checks,
    +166        known_files=known_files,
    +167    )
    +168    assert rd.validation_summary is not None
    +169    return rd.validation_summary
    +
    + + +

    load a bioimage.io resource description

    + +
    Arguments:
    + +
      +
    • source: Path or URL to an rdf.yaml or a bioimage.io package +(zip-file with rdf.yaml in it).
    • +
    • format_version: (optional) Use this argument to load the resource and +convert its metadata to a higher format_version.
    • +
    • perform_io_checks: Wether or not to perform validation that requires file io, +e.g. downloading a remote files. The existence of local +absolute file paths is still being checked.
    • +
    • known_files: Allows to bypass download and hashing of referenced files +(even if perform_io_checks is True).
    • +
    + +
    Returns:
    + +
    +

    Validation summary of the bioimage.io resource found at source.

    +
    +
    + + +
    +
    + +
    + + def + load_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile], /, *, format_version: Union[Literal['discover'], Literal['latest'], str] = 'discover', perform_io_checks: bool = True, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[NotebookDescr, NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], InvalidDescr]: + + + +
    + +
    29def load_description(
    +30    source: Union[PermissiveFileSource, ZipFile],
    +31    /,
    +32    *,
    +33    format_version: Union[Literal["discover"], Literal["latest"], str] = DISCOVER,
    +34    perform_io_checks: bool = settings.perform_io_checks,
    +35    known_files: Optional[Dict[str, Sha256]] = None,
    +36) -> Union[ResourceDescr, InvalidDescr]:
    +37    """load a bioimage.io resource description
    +38
    +39    Args:
    +40        source: Path or URL to an rdf.yaml or a bioimage.io package
    +41                (zip-file with rdf.yaml in it).
    +42        format_version: (optional) Use this argument to load the resource and
    +43                        convert its metadata to a higher format_version.
    +44        perform_io_checks: Wether or not to perform validation that requires file io,
    +45                           e.g. downloading a remote files. The existence of local
    +46                           absolute file paths is still being checked.
    +47        known_files: Allows to bypass download and hashing of referenced files
    +48                     (even if perform_io_checks is True).
    +49
    +50    Returns:
    +51        An object holding all metadata of the bioimage.io resource
    +52
    +53    """
    +54    if isinstance(source, ResourceDescrBase):
    +55        name = getattr(source, "name", f"{str(source)[:10]}...")
    +56        logger.warning("returning already loaded description '{}' as is", name)
    +57        return source  # pyright: ignore[reportReturnType]
    +58
    +59    opened = open_bioimageio_yaml(source)
    +60
    +61    context = validation_context_var.get().replace(
    +62        root=opened.original_root,
    +63        file_name=opened.original_file_name,
    +64        perform_io_checks=perform_io_checks,
    +65        known_files=known_files,
    +66    )
    +67
    +68    return build_description(
    +69        opened.content,
    +70        context=context,
    +71        format_version=format_version,
    +72    )
    +
    + + +

    load a bioimage.io resource description

    + +
    Arguments:
    + +
      +
    • source: Path or URL to an rdf.yaml or a bioimage.io package +(zip-file with rdf.yaml in it).
    • +
    • format_version: (optional) Use this argument to load the resource and +convert its metadata to a higher format_version.
    • +
    • perform_io_checks: Wether or not to perform validation that requires file io, +e.g. downloading a remote files. The existence of local +absolute file paths is still being checked.
    • +
    • known_files: Allows to bypass download and hashing of referenced files +(even if perform_io_checks is True).
    • +
    + +
    Returns:
    + +
    +

    An object holding all metadata of the bioimage.io resource

    +
    +
    + + +
    +
    + +
    + + def + load_model_description( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], zipfile.ZipFile], /, *, format_version: Union[Literal['discover'], Literal['latest'], str] = 'discover', perform_io_checks: bool = True, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]: + + + +
    + +
    75def load_model_description(
    +76    source: Union[PermissiveFileSource, ZipFile],
    +77    /,
    +78    *,
    +79    format_version: Union[Literal["discover"], Literal["latest"], str] = DISCOVER,
    +80    perform_io_checks: bool = settings.perform_io_checks,
    +81    known_files: Optional[Dict[str, Sha256]] = None,
    +82) -> AnyModelDescr:
    +83    """same as `load_description`, but addtionally ensures that the loaded
    +84    description is valid and of type 'model'.
    +85
    +86    Raises:
    +87        ValueError: for invalid or non-model resources
    +88    """
    +89    rd = load_description(
    +90        source,
    +91        format_version=format_version,
    +92        perform_io_checks=perform_io_checks,
    +93        known_files=known_files,
    +94    )
    +95    return ensure_description_is_model(rd)
    +
    + + +

    same as load_description, but addtionally ensures that the loaded +description is valid and of type 'model'.

    + +
    Raises:
    + +
      +
    • ValueError: for invalid or non-model resources
    • +
    +
    + + +
    +
    + +
    + + class + ModelDescr(bioimageio.spec.generic.v0_3.GenericModelDescrBase): + + + +
    + +
    2062class ModelDescr(GenericModelDescrBase, title="bioimage.io model specification"):
    +2063    """Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights.
    +2064    These fields are typically stored in a YAML file which we call a model resource description file (model RDF).
    +2065    """
    +2066
    +2067    format_version: Literal["0.5.3"] = "0.5.3"
    +2068    """Version of the bioimage.io model description specification used.
    +2069    When creating a new model always use the latest micro/patch version described here.
    +2070    The `format_version` is important for any consumer software to understand how to parse the fields.
    +2071    """
    +2072
    +2073    type: Literal["model"] = "model"
    +2074    """Specialized resource type 'model'"""
    +2075
    +2076    id: Optional[ModelId] = None
    +2077    """bioimage.io-wide unique resource identifier
    +2078    assigned by bioimage.io; version **un**specific."""
    +2079
    +2080    authors: NotEmpty[List[Author]]
    +2081    """The authors are the creators of the model RDF and the primary points of contact."""
    +2082
    +2083    documentation: Annotated[
    +2084        DocumentationSource,
    +2085        Field(
    +2086            examples=[
    +2087                "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
    +2088                "README.md",
    +2089            ],
    +2090        ),
    +2091    ]
    +2092    """∈📦 URL or relative path to a markdown file with additional documentation.
    +2093    The recommended documentation file name is `README.md`. An `.md` suffix is mandatory.
    +2094    The documentation should include a '#[#] Validation' (sub)section
    +2095    with details on how to quantitatively validate the model on unseen data."""
    +2096
    +2097    @field_validator("documentation", mode="after")
    +2098    @classmethod
    +2099    def _validate_documentation(cls, value: DocumentationSource) -> DocumentationSource:
    +2100        if not validation_context_var.get().perform_io_checks:
    +2101            return value
    +2102
    +2103        doc_path = download(value).path
    +2104        doc_content = doc_path.read_text(encoding="utf-8")
    +2105        assert isinstance(doc_content, str)
    +2106        if not re.match("#.*[vV]alidation", doc_content):
    +2107            issue_warning(
    +2108                "No '# Validation' (sub)section found in {value}.",
    +2109                value=value,
    +2110                field="documentation",
    +2111            )
    +2112
    +2113        return value
    +2114
    +2115    inputs: NotEmpty[Sequence[InputTensorDescr]]
    +2116    """Describes the input tensors expected by this model."""
    +2117
    +2118    @field_validator("inputs", mode="after")
    +2119    @classmethod
    +2120    def _validate_input_axes(
    +2121        cls, inputs: Sequence[InputTensorDescr]
    +2122    ) -> Sequence[InputTensorDescr]:
    +2123        input_size_refs = cls._get_axes_with_independent_size(inputs)
    +2124
    +2125        for i, ipt in enumerate(inputs):
    +2126            valid_independent_refs: Dict[
    +2127                Tuple[TensorId, AxisId],
    +2128                Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    +2129            ] = {
    +2130                **{
    +2131                    (ipt.id, a.id): (ipt, a, a.size)
    +2132                    for a in ipt.axes
    +2133                    if not isinstance(a, BatchAxis)
    +2134                    and isinstance(a.size, (int, ParameterizedSize))
    +2135                },
    +2136                **input_size_refs,
    +2137            }
    +2138            for a, ax in enumerate(ipt.axes):
    +2139                cls._validate_axis(
    +2140                    "inputs",
    +2141                    i=i,
    +2142                    tensor_id=ipt.id,
    +2143                    a=a,
    +2144                    axis=ax,
    +2145                    valid_independent_refs=valid_independent_refs,
    +2146                )
    +2147        return inputs
    +2148
    +2149    @staticmethod
    +2150    def _validate_axis(
    +2151        field_name: str,
    +2152        i: int,
    +2153        tensor_id: TensorId,
    +2154        a: int,
    +2155        axis: AnyAxis,
    +2156        valid_independent_refs: Dict[
    +2157            Tuple[TensorId, AxisId],
    +2158            Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    +2159        ],
    +2160    ):
    +2161        if isinstance(axis, BatchAxis) or isinstance(
    +2162            axis.size, (int, ParameterizedSize, DataDependentSize)
    +2163        ):
    +2164            return
    +2165        elif not isinstance(axis.size, SizeReference):
    +2166            assert_never(axis.size)
    +2167
    +2168        # validate axis.size SizeReference
    +2169        ref = (axis.size.tensor_id, axis.size.axis_id)
    +2170        if ref not in valid_independent_refs:
    +2171            raise ValueError(
    +2172                "Invalid tensor axis reference at"
    +2173                + f" {field_name}[{i}].axes[{a}].size: {axis.size}."
    +2174            )
    +2175        if ref == (tensor_id, axis.id):
    +2176            raise ValueError(
    +2177                "Self-referencing not allowed for"
    +2178                + f" {field_name}[{i}].axes[{a}].size: {axis.size}"
    +2179            )
    +2180        if axis.type == "channel":
    +2181            if valid_independent_refs[ref][1].type != "channel":
    +2182                raise ValueError(
    +2183                    "A channel axis' size may only reference another fixed size"
    +2184                    + " channel axis."
    +2185                )
    +2186            if isinstance(axis.channel_names, str) and "{i}" in axis.channel_names:
    +2187                ref_size = valid_independent_refs[ref][2]
    +2188                assert isinstance(ref_size, int), (
    +2189                    "channel axis ref (another channel axis) has to specify fixed"
    +2190                    + " size"
    +2191                )
    +2192                generated_channel_names = [
    +2193                    Identifier(axis.channel_names.format(i=i))
    +2194                    for i in range(1, ref_size + 1)
    +2195                ]
    +2196                axis.channel_names = generated_channel_names
    +2197
    +2198        if (ax_unit := getattr(axis, "unit", None)) != (
    +2199            ref_unit := getattr(valid_independent_refs[ref][1], "unit", None)
    +2200        ):
    +2201            raise ValueError(
    +2202                "The units of an axis and its reference axis need to match, but"
    +2203                + f" '{ax_unit}' != '{ref_unit}'."
    +2204            )
    +2205        ref_axis = valid_independent_refs[ref][1]
    +2206        if isinstance(ref_axis, BatchAxis):
    +2207            raise ValueError(
    +2208                f"Invalid reference axis '{ref_axis.id}' for {tensor_id}.{axis.id}"
    +2209                + " (a batch axis is not allowed as reference)."
    +2210            )
    +2211
    +2212        if isinstance(axis, WithHalo):
    +2213            min_size = axis.size.get_size(axis, ref_axis, n=0)
    +2214            if (min_size - 2 * axis.halo) < 1:
    +2215                raise ValueError(
    +2216                    f"axis {axis.id} with minimum size {min_size} is too small for halo"
    +2217                    + f" {axis.halo}."
    +2218                )
    +2219
    +2220            input_halo = axis.halo * axis.scale / ref_axis.scale
    +2221            if input_halo != int(input_halo) or input_halo % 2 == 1:
    +2222                raise ValueError(
    +2223                    f"input_halo {input_halo} (output_halo {axis.halo} *"
    +2224                    + f" output_scale {axis.scale} / input_scale {ref_axis.scale})"
    +2225                    + f" is not an even integer for {tensor_id}.{axis.id}."
    +2226                )
    +2227
    +2228    @model_validator(mode="after")
    +2229    def _validate_test_tensors(self) -> Self:
    +2230        if not validation_context_var.get().perform_io_checks:
    +2231            return self
    +2232
    +2233        test_arrays = [
    +2234            load_array(descr.test_tensor.download().path)
    +2235            for descr in chain(self.inputs, self.outputs)
    +2236        ]
    +2237        tensors = {
    +2238            descr.id: (descr, array)
    +2239            for descr, array in zip(chain(self.inputs, self.outputs), test_arrays)
    +2240        }
    +2241        validate_tensors(tensors, tensor_origin="test_tensor")
    +2242        return self
    +2243
    +2244    @model_validator(mode="after")
    +2245    def _validate_tensor_references_in_proc_kwargs(self, info: ValidationInfo) -> Self:
    +2246        ipt_refs = {t.id for t in self.inputs}
    +2247        out_refs = {t.id for t in self.outputs}
    +2248        for ipt in self.inputs:
    +2249            for p in ipt.preprocessing:
    +2250                ref = p.kwargs.get("reference_tensor")
    +2251                if ref is None:
    +2252                    continue
    +2253                if ref not in ipt_refs:
    +2254                    raise ValueError(
    +2255                        f"`reference_tensor` '{ref}' not found. Valid input tensor"
    +2256                        + f" references are: {ipt_refs}."
    +2257                    )
    +2258
    +2259        for out in self.outputs:
    +2260            for p in out.postprocessing:
    +2261                ref = p.kwargs.get("reference_tensor")
    +2262                if ref is None:
    +2263                    continue
    +2264
    +2265                if ref not in ipt_refs and ref not in out_refs:
    +2266                    raise ValueError(
    +2267                        f"`reference_tensor` '{ref}' not found. Valid tensor references"
    +2268                        + f" are: {ipt_refs | out_refs}."
    +2269                    )
    +2270
    +2271        return self
    +2272
    +2273    # TODO: use validate funcs in validate_test_tensors
    +2274    # def validate_inputs(self, input_tensors: Mapping[TensorId, NDArray[Any]]) -> Mapping[TensorId, NDArray[Any]]:
    +2275
    +2276    name: Annotated[
    +2277        Annotated[
    +2278            str, RestrictCharacters(string.ascii_letters + string.digits + "_- ()")
    +2279        ],
    +2280        MinLen(5),
    +2281        MaxLen(128),
    +2282        warn(MaxLen(64), "Name longer than 64 characters.", INFO),
    +2283    ]
    +2284    """A human-readable name of this model.
    +2285    It should be no longer than 64 characters
    +2286    and may only contain letter, number, underscore, minus, parentheses and spaces.
    +2287    We recommend to chose a name that refers to the model's task and image modality.
    +2288    """
    +2289
    +2290    outputs: NotEmpty[Sequence[OutputTensorDescr]]
    +2291    """Describes the output tensors."""
    +2292
    +2293    @field_validator("outputs", mode="after")
    +2294    @classmethod
    +2295    def _validate_tensor_ids(
    +2296        cls, outputs: Sequence[OutputTensorDescr], info: ValidationInfo
    +2297    ) -> Sequence[OutputTensorDescr]:
    +2298        tensor_ids = [
    +2299            t.id for t in info.data.get("inputs", []) + info.data.get("outputs", [])
    +2300        ]
    +2301        duplicate_tensor_ids: List[str] = []
    +2302        seen: Set[str] = set()
    +2303        for t in tensor_ids:
    +2304            if t in seen:
    +2305                duplicate_tensor_ids.append(t)
    +2306
    +2307            seen.add(t)
    +2308
    +2309        if duplicate_tensor_ids:
    +2310            raise ValueError(f"Duplicate tensor ids: {duplicate_tensor_ids}")
    +2311
    +2312        return outputs
    +2313
    +2314    @staticmethod
    +2315    def _get_axes_with_parameterized_size(
    +2316        io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
    +2317    ):
    +2318        return {
    +2319            f"{t.id}.{a.id}": (t, a, a.size)
    +2320            for t in io
    +2321            for a in t.axes
    +2322            if not isinstance(a, BatchAxis) and isinstance(a.size, ParameterizedSize)
    +2323        }
    +2324
    +2325    @staticmethod
    +2326    def _get_axes_with_independent_size(
    +2327        io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
    +2328    ):
    +2329        return {
    +2330            (t.id, a.id): (t, a, a.size)
    +2331            for t in io
    +2332            for a in t.axes
    +2333            if not isinstance(a, BatchAxis)
    +2334            and isinstance(a.size, (int, ParameterizedSize))
    +2335        }
    +2336
    +2337    @field_validator("outputs", mode="after")
    +2338    @classmethod
    +2339    def _validate_output_axes(
    +2340        cls, outputs: List[OutputTensorDescr], info: ValidationInfo
    +2341    ) -> List[OutputTensorDescr]:
    +2342        input_size_refs = cls._get_axes_with_independent_size(
    +2343            info.data.get("inputs", [])
    +2344        )
    +2345        output_size_refs = cls._get_axes_with_independent_size(outputs)
    +2346
    +2347        for i, out in enumerate(outputs):
    +2348            valid_independent_refs: Dict[
    +2349                Tuple[TensorId, AxisId],
    +2350                Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    +2351            ] = {
    +2352                **{
    +2353                    (out.id, a.id): (out, a, a.size)
    +2354                    for a in out.axes
    +2355                    if not isinstance(a, BatchAxis)
    +2356                    and isinstance(a.size, (int, ParameterizedSize))
    +2357                },
    +2358                **input_size_refs,
    +2359                **output_size_refs,
    +2360            }
    +2361            for a, ax in enumerate(out.axes):
    +2362                cls._validate_axis(
    +2363                    "outputs",
    +2364                    i,
    +2365                    out.id,
    +2366                    a,
    +2367                    ax,
    +2368                    valid_independent_refs=valid_independent_refs,
    +2369                )
    +2370
    +2371        return outputs
    +2372
    +2373    packaged_by: List[Author] = Field(default_factory=list)
    +2374    """The persons that have packaged and uploaded this model.
    +2375    Only required if those persons differ from the `authors`."""
    +2376
    +2377    parent: Optional[LinkedModel] = None
    +2378    """The model from which this model is derived, e.g. by fine-tuning the weights."""
    +2379
    +2380    # todo: add parent self check once we have `id`
    +2381    # @model_validator(mode="after")
    +2382    # def validate_parent_is_not_self(self) -> Self:
    +2383    #     if self.parent is not None and self.parent == self.id:
    +2384    #         raise ValueError("The model may not reference itself as parent model")
    +2385
    +2386    #     return self
    +2387
    +2388    run_mode: Annotated[
    +2389        Optional[RunMode],
    +2390        warn(None, "Run mode '{value}' has limited support across consumer softwares."),
    +2391    ] = None
    +2392    """Custom run mode for this model: for more complex prediction procedures like test time
    +2393    data augmentation that currently cannot be expressed in the specification.
    +2394    No standard run modes are defined yet."""
    +2395
    +2396    timestamp: Datetime = Datetime(datetime.now())
    +2397    """Timestamp in [ISO 8601](#https://en.wikipedia.org/wiki/ISO_8601) format
    +2398    with a few restrictions listed [here](https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat).
    +2399    (In Python a datetime object is valid, too)."""
    +2400
    +2401    training_data: Annotated[
    +2402        Union[None, LinkedDataset, DatasetDescr, DatasetDescr02],
    +2403        Field(union_mode="left_to_right"),
    +2404    ] = None
    +2405    """The dataset used to train this model"""
    +2406
    +2407    weights: Annotated[WeightsDescr, WrapSerializer(package_weights)]
    +2408    """The weights for this model.
    +2409    Weights can be given for different formats, but should otherwise be equivalent.
    +2410    The available weight formats determine which consumers can use this model."""
    +2411
    +2412    @model_validator(mode="after")
    +2413    def _add_default_cover(self) -> Self:
    +2414        if not validation_context_var.get().perform_io_checks or self.covers:
    +2415            return self
    +2416
    +2417        try:
    +2418            generated_covers = generate_covers(
    +2419                [(t, load_array(t.test_tensor.download().path)) for t in self.inputs],
    +2420                [(t, load_array(t.test_tensor.download().path)) for t in self.outputs],
    +2421            )
    +2422        except Exception as e:
    +2423            issue_warning(
    +2424                "Failed to generate cover image(s): {e}",
    +2425                value=self.covers,
    +2426                msg_context=dict(e=e),
    +2427                field="covers",
    +2428            )
    +2429        else:
    +2430            self.covers.extend(generated_covers)
    +2431
    +2432        return self
    +2433
    +2434    def get_input_test_arrays(self) -> List[NDArray[Any]]:
    +2435        data = [load_array(ipt.test_tensor.download().path) for ipt in self.inputs]
    +2436        assert all(isinstance(d, np.ndarray) for d in data)
    +2437        return data
    +2438
    +2439    def get_output_test_arrays(self) -> List[NDArray[Any]]:
    +2440        data = [load_array(out.test_tensor.download().path) for out in self.outputs]
    +2441        assert all(isinstance(d, np.ndarray) for d in data)
    +2442        return data
    +2443
    +2444    @staticmethod
    +2445    def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int:
    +2446        batch_size = 1
    +2447        tensor_with_batchsize: Optional[TensorId] = None
    +2448        for tid in tensor_sizes:
    +2449            for aid, s in tensor_sizes[tid].items():
    +2450                if aid != BATCH_AXIS_ID or s == 1 or s == batch_size:
    +2451                    continue
    +2452
    +2453                if batch_size != 1:
    +2454                    assert tensor_with_batchsize is not None
    +2455                    raise ValueError(
    +2456                        f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})"
    +2457                    )
    +2458
    +2459                batch_size = s
    +2460                tensor_with_batchsize = tid
    +2461
    +2462        return batch_size
    +2463
    +2464    def get_output_tensor_sizes(
    +2465        self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]
    +2466    ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]:
    +2467        """Returns the tensor output sizes for given **input_sizes**.
    +2468        Only if **input_sizes** has a valid input shape, the tensor output size is exact.
    +2469        Otherwise it might be larger than the actual (valid) output"""
    +2470        batch_size = self.get_batch_size(input_sizes)
    +2471        ns = self.get_ns(input_sizes)
    +2472
    +2473        tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size)
    +2474        return tensor_sizes.outputs
    +2475
    +2476    def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]):
    +2477        """get parameter `n` for each parameterized axis
    +2478        such that the valid input size is >= the given input size"""
    +2479        ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {}
    +2480        axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs}
    +2481        for tid in input_sizes:
    +2482            for aid, s in input_sizes[tid].items():
    +2483                size_descr = axes[tid][aid].size
    +2484                if isinstance(size_descr, ParameterizedSize):
    +2485                    ret[(tid, aid)] = size_descr.get_n(s)
    +2486                elif size_descr is None or isinstance(size_descr, (int, SizeReference)):
    +2487                    pass
    +2488                else:
    +2489                    assert_never(size_descr)
    +2490
    +2491        return ret
    +2492
    +2493    def get_tensor_sizes(
    +2494        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
    +2495    ) -> _TensorSizes:
    +2496        axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size)
    +2497        return _TensorSizes(
    +2498            {
    +2499                t: {
    +2500                    aa: axis_sizes.inputs[(tt, aa)]
    +2501                    for tt, aa in axis_sizes.inputs
    +2502                    if tt == t
    +2503                }
    +2504                for t in {tt for tt, _ in axis_sizes.inputs}
    +2505            },
    +2506            {
    +2507                t: {
    +2508                    aa: axis_sizes.outputs[(tt, aa)]
    +2509                    for tt, aa in axis_sizes.outputs
    +2510                    if tt == t
    +2511                }
    +2512                for t in {tt for tt, _ in axis_sizes.outputs}
    +2513            },
    +2514        )
    +2515
    +2516    def get_axis_sizes(
    +2517        self,
    +2518        ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N],
    +2519        batch_size: Optional[int] = None,
    +2520        *,
    +2521        max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None,
    +2522    ) -> _AxisSizes:
    +2523        """Determine input and output block shape for scale factors **ns**
    +2524        of parameterized input sizes.
    +2525
    +2526        Args:
    +2527            ns: Scale factor `n` for each axis (keyed by (tensor_id, axis_id))
    +2528                that is parameterized as `size = min + n * step`.
    +2529            batch_size: The desired size of the batch dimension.
    +2530                If given **batch_size** overwrites any batch size present in
    +2531                **max_input_shape**. Default 1.
    +2532            max_input_shape: Limits the derived block shapes.
    +2533                Each axis for which the input size, parameterized by `n`, is larger
    +2534                than **max_input_shape** is set to the minimal value `n_min` for which
    +2535                this is still true.
    +2536                Use this for small input samples or large values of **ns**.
    +2537                Or simply whenever you know the full input shape.
    +2538
    +2539        Returns:
    +2540            Resolved axis sizes for model inputs and outputs.
    +2541        """
    +2542        max_input_shape = max_input_shape or {}
    +2543        if batch_size is None:
    +2544            for (_t_id, a_id), s in max_input_shape.items():
    +2545                if a_id == BATCH_AXIS_ID:
    +2546                    batch_size = s
    +2547                    break
    +2548            else:
    +2549                batch_size = 1
    +2550
    +2551        all_axes = {
    +2552            t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs)
    +2553        }
    +2554
    +2555        inputs: Dict[Tuple[TensorId, AxisId], int] = {}
    +2556        outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {}
    +2557
    +2558        def get_axis_size(a: Union[InputAxis, OutputAxis]):
    +2559            if isinstance(a, BatchAxis):
    +2560                if (t_descr.id, a.id) in ns:
    +2561                    logger.warning(
    +2562                        "Ignoring unexpected size increment factor (n) for batch axis"
    +2563                        + " of tensor '{}'.",
    +2564                        t_descr.id,
    +2565                    )
    +2566                return batch_size
    +2567            elif isinstance(a.size, int):
    +2568                if (t_descr.id, a.id) in ns:
    +2569                    logger.warning(
    +2570                        "Ignoring unexpected size increment factor (n) for fixed size"
    +2571                        + " axis '{}' of tensor '{}'.",
    +2572                        a.id,
    +2573                        t_descr.id,
    +2574                    )
    +2575                return a.size
    +2576            elif isinstance(a.size, ParameterizedSize):
    +2577                if (t_descr.id, a.id) not in ns:
    +2578                    raise ValueError(
    +2579                        "Size increment factor (n) missing for parametrized axis"
    +2580                        + f" '{a.id}' of tensor '{t_descr.id}'."
    +2581                    )
    +2582                n = ns[(t_descr.id, a.id)]
    +2583                s_max = max_input_shape.get((t_descr.id, a.id))
    +2584                if s_max is not None:
    +2585                    n = min(n, a.size.get_n(s_max))
    +2586
    +2587                return a.size.get_size(n)
    +2588
    +2589            elif isinstance(a.size, SizeReference):
    +2590                if (t_descr.id, a.id) in ns:
    +2591                    logger.warning(
    +2592                        "Ignoring unexpected size increment factor (n) for axis '{}'"
    +2593                        + " of tensor '{}' with size reference.",
    +2594                        a.id,
    +2595                        t_descr.id,
    +2596                    )
    +2597                assert not isinstance(a, BatchAxis)
    +2598                ref_axis = all_axes[a.size.tensor_id][a.size.axis_id]
    +2599                assert not isinstance(ref_axis, BatchAxis)
    +2600                ref_key = (a.size.tensor_id, a.size.axis_id)
    +2601                ref_size = inputs.get(ref_key, outputs.get(ref_key))
    +2602                assert ref_size is not None, ref_key
    +2603                assert not isinstance(ref_size, _DataDepSize), ref_key
    +2604                return a.size.get_size(
    +2605                    axis=a,
    +2606                    ref_axis=ref_axis,
    +2607                    ref_size=ref_size,
    +2608                )
    +2609            elif isinstance(a.size, DataDependentSize):
    +2610                if (t_descr.id, a.id) in ns:
    +2611                    logger.warning(
    +2612                        "Ignoring unexpected increment factor (n) for data dependent"
    +2613                        + " size axis '{}' of tensor '{}'.",
    +2614                        a.id,
    +2615                        t_descr.id,
    +2616                    )
    +2617                return _DataDepSize(a.size.min, a.size.max)
    +2618            else:
    +2619                assert_never(a.size)
    +2620
    +2621        # first resolve all , but the `SizeReference` input sizes
    +2622        for t_descr in self.inputs:
    +2623            for a in t_descr.axes:
    +2624                if not isinstance(a.size, SizeReference):
    +2625                    s = get_axis_size(a)
    +2626                    assert not isinstance(s, _DataDepSize)
    +2627                    inputs[t_descr.id, a.id] = s
    +2628
    +2629        # resolve all other input axis sizes
    +2630        for t_descr in self.inputs:
    +2631            for a in t_descr.axes:
    +2632                if isinstance(a.size, SizeReference):
    +2633                    s = get_axis_size(a)
    +2634                    assert not isinstance(s, _DataDepSize)
    +2635                    inputs[t_descr.id, a.id] = s
    +2636
    +2637        # resolve all output axis sizes
    +2638        for t_descr in self.outputs:
    +2639            for a in t_descr.axes:
    +2640                assert not isinstance(a.size, ParameterizedSize)
    +2641                s = get_axis_size(a)
    +2642                outputs[t_descr.id, a.id] = s
    +2643
    +2644        return _AxisSizes(inputs=inputs, outputs=outputs)
    +2645
    +2646    @model_validator(mode="before")
    +2647    @classmethod
    +2648    def _convert(cls, data: Dict[str, Any]) -> Dict[str, Any]:
    +2649        if (
    +2650            data.get("type") == "model"
    +2651            and isinstance(fv := data.get("format_version"), str)
    +2652            and fv.count(".") == 2
    +2653        ):
    +2654            fv_parts = fv.split(".")
    +2655            if any(not p.isdigit() for p in fv_parts):
    +2656                return data
    +2657
    +2658            fv_tuple = tuple(map(int, fv_parts))
    +2659
    +2660            assert cls.implemented_format_version_tuple[0:2] == (0, 5)
    +2661            if fv_tuple[:2] in ((0, 3), (0, 4)):
    +2662                m04 = _ModelDescr_v0_4.load(data)
    +2663                if not isinstance(m04, InvalidDescr):
    +2664                    return _model_conv.convert_as_dict(m04)
    +2665            elif fv_tuple[:2] == (0, 5):
    +2666                # bump patch version
    +2667                data["format_version"] = cls.implemented_format_version
    +2668
    +2669        return data
    +
    + + +

    Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights. +These fields are typically stored in a YAML file which we call a model resource description file (model RDF).

    +
    + + +
    +
    + format_version: Literal['0.5.3'] + + +
    + + +

    Version of the bioimage.io model description specification used. +When creating a new model always use the latest micro/patch version described here. +The format_version is important for any consumer software to understand how to parse the fields.

    +
    + + +
    +
    +
    + type: Literal['model'] + + +
    + + +

    Specialized resource type 'model'

    +
    + + +
    +
    +
    + id: Optional[bioimageio.spec.model.v0_5.ModelId] + + +
    + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    +
    + authors: Annotated[List[bioimageio.spec.generic.v0_3.Author], MinLen(min_length=1)] + + +
    + + +

    The authors are the creators of the model RDF and the primary points of contact.

    +
    + + +
    +
    +
    + documentation: Annotated[Union[Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute)], bioimageio.spec._internal.io.RelativeFilePath, bioimageio.spec._internal.url.HttpUrl], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function _validate_md_suffix at 0x7f5380dc7e20>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none'), FieldInfo(annotation=NoneType, required=True, examples=['https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md', 'README.md'])] + + +
    + + +

    ∈📦 URL or relative path to a markdown file with additional documentation. +The recommended documentation file name is README.md. An .md suffix is mandatory. +The documentation should include a '#[#] Validation' (sub)section +with details on how to quantitatively validate the model on unseen data.

    +
    + + +
    +
    +
    + inputs: Annotated[Sequence[bioimageio.spec.model.v0_5.InputTensorDescr], MinLen(min_length=1)] + + +
    + + +

    Describes the input tensors expected by this model.

    +
    + + +
    +
    +
    + name: Annotated[str, RestrictCharacters(alphabet='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_- ()'), MinLen(min_length=5), MaxLen(max_length=128), AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f537062a0c0>, severity=20, msg='Name longer than 64 characters.', context={'typ': Annotated[Any, MaxLen(max_length=64)]})] + + +
    + + +

    A human-readable name of this model. +It should be no longer than 64 characters +and may only contain letter, number, underscore, minus, parentheses and spaces. +We recommend to chose a name that refers to the model's task and image modality.

    +
    + + +
    +
    +
    + outputs: Annotated[Sequence[bioimageio.spec.model.v0_5.OutputTensorDescr], MinLen(min_length=1)] + + +
    + + +

    Describes the output tensors.

    +
    + + +
    +
    +
    + packaged_by: List[bioimageio.spec.generic.v0_3.Author] + + +
    + + +

    The persons that have packaged and uploaded this model. +Only required if those persons differ from the authors.

    +
    + + +
    +
    +
    + parent: Optional[bioimageio.spec.model.v0_5.LinkedModel] + + +
    + + +

    The model from which this model is derived, e.g. by fine-tuning the weights.

    +
    + + +
    +
    +
    + run_mode: Annotated[Optional[bioimageio.spec.model.v0_4.RunMode], AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f537062a520>, severity=30, msg="Run mode '{value}' has limited support across consumer softwares.", context={'typ': None})] + + +
    + + +

    Custom run mode for this model: for more complex prediction procedures like test time +data augmentation that currently cannot be expressed in the specification. +No standard run modes are defined yet.

    +
    + + +
    +
    +
    + timestamp: bioimageio.spec._internal.types.Datetime + + +
    + + +

    Timestamp in ISO 8601 format +with a few restrictions listed here. +(In Python a datetime object is valid, too).

    +
    + + +
    +
    +
    + training_data: Annotated[Union[NoneType, bioimageio.spec.dataset.v0_3.LinkedDataset, DatasetDescr, bioimageio.spec.dataset.v0_2.DatasetDescr], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])] + + +
    + + +

    The dataset used to train this model

    +
    + + +
    +
    +
    + weights: Annotated[bioimageio.spec.model.v0_5.WeightsDescr, WrapSerializer(func=<function package_weights at 0x7f537f154360>, return_type=PydanticUndefined, when_used='always')] + + +
    + + +

    The weights for this model. +Weights can be given for different formats, but should otherwise be equivalent. +The available weight formats determine which consumers can use this model.

    +
    + + +
    +
    + +
    + + def + get_input_test_arrays(self) -> List[numpy.ndarray[Any, numpy.dtype[Any]]]: + + + +
    + +
    2434    def get_input_test_arrays(self) -> List[NDArray[Any]]:
    +2435        data = [load_array(ipt.test_tensor.download().path) for ipt in self.inputs]
    +2436        assert all(isinstance(d, np.ndarray) for d in data)
    +2437        return data
    +
    + + + + +
    +
    + +
    + + def + get_output_test_arrays(self) -> List[numpy.ndarray[Any, numpy.dtype[Any]]]: + + + +
    + +
    2439    def get_output_test_arrays(self) -> List[NDArray[Any]]:
    +2440        data = [load_array(out.test_tensor.download().path) for out in self.outputs]
    +2441        assert all(isinstance(d, np.ndarray) for d in data)
    +2442        return data
    +
    + + + + +
    +
    + +
    +
    @staticmethod
    + + def + get_batch_size( tensor_sizes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]]) -> int: + + + +
    + +
    2444    @staticmethod
    +2445    def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int:
    +2446        batch_size = 1
    +2447        tensor_with_batchsize: Optional[TensorId] = None
    +2448        for tid in tensor_sizes:
    +2449            for aid, s in tensor_sizes[tid].items():
    +2450                if aid != BATCH_AXIS_ID or s == 1 or s == batch_size:
    +2451                    continue
    +2452
    +2453                if batch_size != 1:
    +2454                    assert tensor_with_batchsize is not None
    +2455                    raise ValueError(
    +2456                        f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})"
    +2457                    )
    +2458
    +2459                batch_size = s
    +2460                tensor_with_batchsize = tid
    +2461
    +2462        return batch_size
    +
    + + + + +
    +
    + +
    + + def + get_output_tensor_sizes( self, input_sizes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]]) -> Dict[bioimageio.spec.model.v0_5.TensorId, Dict[bioimageio.spec.model.v0_5.AxisId, Union[int, bioimageio.spec.model.v0_5._DataDepSize]]]: + + + +
    + +
    2464    def get_output_tensor_sizes(
    +2465        self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]
    +2466    ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]:
    +2467        """Returns the tensor output sizes for given **input_sizes**.
    +2468        Only if **input_sizes** has a valid input shape, the tensor output size is exact.
    +2469        Otherwise it might be larger than the actual (valid) output"""
    +2470        batch_size = self.get_batch_size(input_sizes)
    +2471        ns = self.get_ns(input_sizes)
    +2472
    +2473        tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size)
    +2474        return tensor_sizes.outputs
    +
    + + +

    Returns the tensor output sizes for given input_sizes. +Only if input_sizes has a valid input shape, the tensor output size is exact. +Otherwise it might be larger than the actual (valid) output

    +
    + + +
    +
    + +
    + + def + get_ns( self, input_sizes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]]): + + + +
    + +
    2476    def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]):
    +2477        """get parameter `n` for each parameterized axis
    +2478        such that the valid input size is >= the given input size"""
    +2479        ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {}
    +2480        axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs}
    +2481        for tid in input_sizes:
    +2482            for aid, s in input_sizes[tid].items():
    +2483                size_descr = axes[tid][aid].size
    +2484                if isinstance(size_descr, ParameterizedSize):
    +2485                    ret[(tid, aid)] = size_descr.get_n(s)
    +2486                elif size_descr is None or isinstance(size_descr, (int, SizeReference)):
    +2487                    pass
    +2488                else:
    +2489                    assert_never(size_descr)
    +2490
    +2491        return ret
    +
    + + +

    get parameter n for each parameterized axis +such that the valid input size is >= the given input size

    +
    + + +
    +
    + +
    + + def + get_tensor_sizes( self, ns: Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], batch_size: int) -> bioimageio.spec.model.v0_5._TensorSizes: + + + +
    + +
    2493    def get_tensor_sizes(
    +2494        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
    +2495    ) -> _TensorSizes:
    +2496        axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size)
    +2497        return _TensorSizes(
    +2498            {
    +2499                t: {
    +2500                    aa: axis_sizes.inputs[(tt, aa)]
    +2501                    for tt, aa in axis_sizes.inputs
    +2502                    if tt == t
    +2503                }
    +2504                for t in {tt for tt, _ in axis_sizes.inputs}
    +2505            },
    +2506            {
    +2507                t: {
    +2508                    aa: axis_sizes.outputs[(tt, aa)]
    +2509                    for tt, aa in axis_sizes.outputs
    +2510                    if tt == t
    +2511                }
    +2512                for t in {tt for tt, _ in axis_sizes.outputs}
    +2513            },
    +2514        )
    +
    + + + + +
    +
    + +
    + + def + get_axis_sizes( self, ns: Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], batch_size: Optional[int] = None, *, max_input_shape: Optional[Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int]] = None) -> bioimageio.spec.model.v0_5._AxisSizes: + + + +
    + +
    2516    def get_axis_sizes(
    +2517        self,
    +2518        ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N],
    +2519        batch_size: Optional[int] = None,
    +2520        *,
    +2521        max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None,
    +2522    ) -> _AxisSizes:
    +2523        """Determine input and output block shape for scale factors **ns**
    +2524        of parameterized input sizes.
    +2525
    +2526        Args:
    +2527            ns: Scale factor `n` for each axis (keyed by (tensor_id, axis_id))
    +2528                that is parameterized as `size = min + n * step`.
    +2529            batch_size: The desired size of the batch dimension.
    +2530                If given **batch_size** overwrites any batch size present in
    +2531                **max_input_shape**. Default 1.
    +2532            max_input_shape: Limits the derived block shapes.
    +2533                Each axis for which the input size, parameterized by `n`, is larger
    +2534                than **max_input_shape** is set to the minimal value `n_min` for which
    +2535                this is still true.
    +2536                Use this for small input samples or large values of **ns**.
    +2537                Or simply whenever you know the full input shape.
    +2538
    +2539        Returns:
    +2540            Resolved axis sizes for model inputs and outputs.
    +2541        """
    +2542        max_input_shape = max_input_shape or {}
    +2543        if batch_size is None:
    +2544            for (_t_id, a_id), s in max_input_shape.items():
    +2545                if a_id == BATCH_AXIS_ID:
    +2546                    batch_size = s
    +2547                    break
    +2548            else:
    +2549                batch_size = 1
    +2550
    +2551        all_axes = {
    +2552            t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs)
    +2553        }
    +2554
    +2555        inputs: Dict[Tuple[TensorId, AxisId], int] = {}
    +2556        outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {}
    +2557
    +2558        def get_axis_size(a: Union[InputAxis, OutputAxis]):
    +2559            if isinstance(a, BatchAxis):
    +2560                if (t_descr.id, a.id) in ns:
    +2561                    logger.warning(
    +2562                        "Ignoring unexpected size increment factor (n) for batch axis"
    +2563                        + " of tensor '{}'.",
    +2564                        t_descr.id,
    +2565                    )
    +2566                return batch_size
    +2567            elif isinstance(a.size, int):
    +2568                if (t_descr.id, a.id) in ns:
    +2569                    logger.warning(
    +2570                        "Ignoring unexpected size increment factor (n) for fixed size"
    +2571                        + " axis '{}' of tensor '{}'.",
    +2572                        a.id,
    +2573                        t_descr.id,
    +2574                    )
    +2575                return a.size
    +2576            elif isinstance(a.size, ParameterizedSize):
    +2577                if (t_descr.id, a.id) not in ns:
    +2578                    raise ValueError(
    +2579                        "Size increment factor (n) missing for parametrized axis"
    +2580                        + f" '{a.id}' of tensor '{t_descr.id}'."
    +2581                    )
    +2582                n = ns[(t_descr.id, a.id)]
    +2583                s_max = max_input_shape.get((t_descr.id, a.id))
    +2584                if s_max is not None:
    +2585                    n = min(n, a.size.get_n(s_max))
    +2586
    +2587                return a.size.get_size(n)
    +2588
    +2589            elif isinstance(a.size, SizeReference):
    +2590                if (t_descr.id, a.id) in ns:
    +2591                    logger.warning(
    +2592                        "Ignoring unexpected size increment factor (n) for axis '{}'"
    +2593                        + " of tensor '{}' with size reference.",
    +2594                        a.id,
    +2595                        t_descr.id,
    +2596                    )
    +2597                assert not isinstance(a, BatchAxis)
    +2598                ref_axis = all_axes[a.size.tensor_id][a.size.axis_id]
    +2599                assert not isinstance(ref_axis, BatchAxis)
    +2600                ref_key = (a.size.tensor_id, a.size.axis_id)
    +2601                ref_size = inputs.get(ref_key, outputs.get(ref_key))
    +2602                assert ref_size is not None, ref_key
    +2603                assert not isinstance(ref_size, _DataDepSize), ref_key
    +2604                return a.size.get_size(
    +2605                    axis=a,
    +2606                    ref_axis=ref_axis,
    +2607                    ref_size=ref_size,
    +2608                )
    +2609            elif isinstance(a.size, DataDependentSize):
    +2610                if (t_descr.id, a.id) in ns:
    +2611                    logger.warning(
    +2612                        "Ignoring unexpected increment factor (n) for data dependent"
    +2613                        + " size axis '{}' of tensor '{}'.",
    +2614                        a.id,
    +2615                        t_descr.id,
    +2616                    )
    +2617                return _DataDepSize(a.size.min, a.size.max)
    +2618            else:
    +2619                assert_never(a.size)
    +2620
    +2621        # first resolve all , but the `SizeReference` input sizes
    +2622        for t_descr in self.inputs:
    +2623            for a in t_descr.axes:
    +2624                if not isinstance(a.size, SizeReference):
    +2625                    s = get_axis_size(a)
    +2626                    assert not isinstance(s, _DataDepSize)
    +2627                    inputs[t_descr.id, a.id] = s
    +2628
    +2629        # resolve all other input axis sizes
    +2630        for t_descr in self.inputs:
    +2631            for a in t_descr.axes:
    +2632                if isinstance(a.size, SizeReference):
    +2633                    s = get_axis_size(a)
    +2634                    assert not isinstance(s, _DataDepSize)
    +2635                    inputs[t_descr.id, a.id] = s
    +2636
    +2637        # resolve all output axis sizes
    +2638        for t_descr in self.outputs:
    +2639            for a in t_descr.axes:
    +2640                assert not isinstance(a.size, ParameterizedSize)
    +2641                s = get_axis_size(a)
    +2642                outputs[t_descr.id, a.id] = s
    +2643
    +2644        return _AxisSizes(inputs=inputs, outputs=outputs)
    +
    + + +

    Determine input and output block shape for scale factors ns +of parameterized input sizes.

    + +
    Arguments:
    + +
      +
    • ns: Scale factor n for each axis (keyed by (tensor_id, axis_id)) +that is parameterized as size = min + n * step.
    • +
    • batch_size: The desired size of the batch dimension. +If given batch_size overwrites any batch size present in +max_input_shape. Default 1.
    • +
    • max_input_shape: Limits the derived block shapes. +Each axis for which the input size, parameterized by n, is larger +than max_input_shape is set to the minimal value n_min for which +this is still true. +Use this for small input samples or large values of ns. +Or simply whenever you know the full input shape.
    • +
    + +
    Returns:
    + +
    +

    Resolved axis sizes for model inputs and outputs.

    +
    +
    + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.5.3' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 5, 3) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    + +
    + + class + NotebookDescr(bioimageio.spec.generic.v0_3.GenericDescrBase): + + + +
    + +
    30class NotebookDescr(GenericDescrBase, title="bioimage.io notebook specification"):
    +31    """Bioimage.io description of a Jupyter notebook."""
    +32
    +33    type: Literal["notebook"] = "notebook"
    +34
    +35    id: Optional[NotebookId] = None
    +36    """bioimage.io-wide unique resource identifier
    +37    assigned by bioimage.io; version **un**specific."""
    +38
    +39    parent: Optional[NotebookId] = None
    +40    """The description from which this one is derived"""
    +41
    +42    source: NotebookSource
    +43    """The Jupyter notebook"""
    +
    + + +

    Bioimage.io description of a Jupyter notebook.

    +
    + + +
    +
    + type: Literal['notebook'] + + +
    + + + + +
    +
    +
    + id: Optional[bioimageio.spec.notebook.v0_3.NotebookId] + + +
    + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    +
    + parent: Optional[bioimageio.spec.notebook.v0_3.NotebookId] + + +
    + + +

    The description from which this one is derived

    +
    + + +
    +
    +
    + source: Union[Annotated[bioimageio.spec._internal.url.HttpUrl, WithSuffix(suffix='.ipynb', case_sensitive=True)], Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute), WithSuffix(suffix='.ipynb', case_sensitive=True)], Annotated[bioimageio.spec._internal.io.RelativeFilePath, WithSuffix(suffix='.ipynb', case_sensitive=True)]] + + +
    + + +

    The Jupyter notebook

    +
    + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.3.0' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 3, 0) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    +
    + ResourceDescr = + + typing.Union[typing.Annotated[typing.Union[typing.Annotated[typing.Union[bioimageio.spec.application.v0_2.ApplicationDescr, ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[bioimageio.spec.dataset.v0_2.DatasetDescr, DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[bioimageio.spec.model.v0_4.ModelDescr, ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[NotebookDescr, NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[bioimageio.spec.generic.v0_2.GenericDescr, GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]] + + +
    + + + + +
    +
    + +
    + + def + save_bioimageio_package_as_folder( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], Dict[str, YamlValue], Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[NotebookDescr, NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], /, *, output_path: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='dir')], NoneType] = None, weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None) -> Annotated[pathlib.Path, PathType(path_type='dir')]: + + + +
    + +
    121def save_bioimageio_package_as_folder(
    +122    source: Union[BioimageioYamlSource, ResourceDescr],
    +123    /,
    +124    *,
    +125    output_path: Union[NewPath, DirectoryPath, None] = None,
    +126    weights_priority_order: Optional[  # model only
    +127        Sequence[
    +128            Literal[
    +129                "keras_hdf5",
    +130                "onnx",
    +131                "pytorch_state_dict",
    +132                "tensorflow_js",
    +133                "tensorflow_saved_model_bundle",
    +134                "torchscript",
    +135            ]
    +136        ]
    +137    ] = None,
    +138) -> DirectoryPath:
    +139    """Write the content of a bioimage.io resource package to a folder.
    +140
    +141    Args:
    +142        source: bioimageio resource description
    +143        output_path: file path to write package to
    +144        weights_priority_order: If given only the first weights format present in the model is included.
    +145                                If none of the prioritized weights formats is found all are included.
    +146
    +147    Returns:
    +148        directory path to bioimageio package folder
    +149    """
    +150    package_content = _prepare_resource_package(
    +151        source,
    +152        weights_priority_order=weights_priority_order,
    +153    )
    +154    if output_path is None:
    +155        output_path = Path(mkdtemp())
    +156    else:
    +157        output_path = Path(output_path)
    +158
    +159    output_path.mkdir(exist_ok=True, parents=True)
    +160    for name, src in package_content.items():
    +161        if isinstance(src, collections.abc.Mapping):
    +162            write_yaml(cast(YamlValue, src), output_path / name)
    +163        elif isinstance(src, ZipPath):
    +164            extracted = Path(src.root.extract(src.name, output_path))
    +165            if extracted.name != src.name:
    +166                try:
    +167                    shutil.move(str(extracted), output_path / src.name)
    +168                except Exception as e:
    +169                    raise RuntimeError(
    +170                        f"Failed to rename extracted file '{extracted.name}'"
    +171                        + f" to '{src.name}'."
    +172                        + f" (extracted from '{src.name}' in '{src.root.filename}')"
    +173                    ) from e
    +174        else:
    +175            shutil.copy(src, output_path / name)
    +176
    +177    return output_path
    +
    + + +

    Write the content of a bioimage.io resource package to a folder.

    + +
    Arguments:
    + +
      +
    • source: bioimageio resource description
    • +
    • output_path: file path to write package to
    • +
    • weights_priority_order: If given only the first weights format present in the model is included. +If none of the prioritized weights formats is found all are included.
    • +
    + +
    Returns:
    + +
    +

    directory path to bioimageio package folder

    +
    +
    + + +
    +
    + +
    + + def + save_bioimageio_package_to_stream( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], Dict[str, YamlValue], Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[NotebookDescr, NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], /, *, compression: int = 8, compression_level: int = 1, output_stream: Optional[IO[bytes]] = None, weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None) -> IO[bytes]: + + + +
    + +
    241def save_bioimageio_package_to_stream(
    +242    source: Union[BioimageioYamlSource, ResourceDescr],
    +243    /,
    +244    *,
    +245    compression: int = ZIP_DEFLATED,
    +246    compression_level: int = 1,
    +247    output_stream: Union[IO[bytes], None] = None,
    +248    weights_priority_order: Optional[  # model only
    +249        Sequence[
    +250            Literal[
    +251                "keras_hdf5",
    +252                "onnx",
    +253                "pytorch_state_dict",
    +254                "tensorflow_js",
    +255                "tensorflow_saved_model_bundle",
    +256                "torchscript",
    +257            ]
    +258        ]
    +259    ] = None,
    +260) -> IO[bytes]:
    +261    """Package a bioimageio resource into a stream.
    +262
    +263    Args:
    +264        rd: bioimageio resource description
    +265        compression: The numeric constant of compression method.
    +266        compression_level: Compression level to use when writing files to the archive.
    +267                           See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
    +268        output_stream: stream to write package to
    +269        weights_priority_order: If given only the first weights format present in the model is included.
    +270                                If none of the prioritized weights formats is found all are included.
    +271
    +272    Note: this function bypasses safety checks and does not load/validate the model after writing.
    +273
    +274    Returns:
    +275        stream of zipped bioimageio package
    +276    """
    +277    if output_stream is None:
    +278        output_stream = BytesIO()
    +279
    +280    package_content = _prepare_resource_package(
    +281        source,
    +282        weights_priority_order=weights_priority_order,
    +283    )
    +284
    +285    write_zip(
    +286        output_stream,
    +287        package_content,
    +288        compression=compression,
    +289        compression_level=compression_level,
    +290    )
    +291
    +292    return output_stream
    +
    + + +

    Package a bioimageio resource into a stream.

    + +
    Arguments:
    + +
      +
    • rd: bioimageio resource description
    • +
    • compression: The numeric constant of compression method.
    • +
    • compression_level: Compression level to use when writing files to the archive. +See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
    • +
    • output_stream: stream to write package to
    • +
    • weights_priority_order: If given only the first weights format present in the model is included. +If none of the prioritized weights formats is found all are included.
    • +
    + +

    Note: this function bypasses safety checks and does not load/validate the model after writing.

    + +
    Returns:
    + +
    +

    stream of zipped bioimageio package

    +
    +
    + + +
    +
    + +
    + + def + save_bioimageio_package( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], Dict[str, YamlValue], Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[NotebookDescr, NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], /, *, compression: int = 8, compression_level: int = 1, output_path: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='file')], NoneType] = None, weights_priority_order: Optional[Sequence[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']]] = None) -> Annotated[pathlib.Path, PathType(path_type='file')]: + + + +
    + +
    180def save_bioimageio_package(
    +181    source: Union[BioimageioYamlSource, ResourceDescr],
    +182    /,
    +183    *,
    +184    compression: int = ZIP_DEFLATED,
    +185    compression_level: int = 1,
    +186    output_path: Union[NewPath, FilePath, None] = None,
    +187    weights_priority_order: Optional[  # model only
    +188        Sequence[
    +189            Literal[
    +190                "keras_hdf5",
    +191                "onnx",
    +192                "pytorch_state_dict",
    +193                "tensorflow_js",
    +194                "tensorflow_saved_model_bundle",
    +195                "torchscript",
    +196            ]
    +197        ]
    +198    ] = None,
    +199) -> FilePath:
    +200    """Package a bioimageio resource as a zip file.
    +201
    +202    Args:
    +203        rd: bioimageio resource description
    +204        compression: The numeric constant of compression method.
    +205        compression_level: Compression level to use when writing files to the archive.
    +206                           See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
    +207        output_path: file path to write package to
    +208        weights_priority_order: If given only the first weights format present in the model is included.
    +209                                If none of the prioritized weights formats is found all are included.
    +210
    +211    Returns:
    +212        path to zipped bioimageio package
    +213    """
    +214    package_content = _prepare_resource_package(
    +215        source,
    +216        weights_priority_order=weights_priority_order,
    +217    )
    +218    if output_path is None:
    +219        output_path = Path(
    +220            NamedTemporaryFile(suffix=".bioimageio.zip", delete=False).name
    +221        )
    +222    else:
    +223        output_path = Path(output_path)
    +224
    +225    write_zip(
    +226        output_path,
    +227        package_content,
    +228        compression=compression,
    +229        compression_level=compression_level,
    +230    )
    +231    with validation_context_var.get().replace(warning_level=ERROR):
    +232        if isinstance((exported := load_description(output_path)), InvalidDescr):
    +233            raise ValueError(
    +234                f"Exported package '{output_path}' is invalid:"
    +235                + f" {exported.validation_summary}"
    +236            )
    +237
    +238    return output_path
    +
    + + +

    Package a bioimageio resource as a zip file.

    + +
    Arguments:
    + +
      +
    • rd: bioimageio resource description
    • +
    • compression: The numeric constant of compression method.
    • +
    • compression_level: Compression level to use when writing files to the archive. +See https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile
    • +
    • output_path: file path to write package to
    • +
    • weights_priority_order: If given only the first weights format present in the model is included. +If none of the prioritized weights formats is found all are included.
    • +
    + +
    Returns:
    + +
    +

    path to zipped bioimageio package

    +
    +
    + + +
    +
    + +
    + + def + save_bioimageio_yaml_only( rd: Union[Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[NotebookDescr, NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Dict[str, YamlValue], InvalidDescr], /, file: Union[Annotated[pathlib.Path, PathType(path_type='new')], Annotated[pathlib.Path, PathType(path_type='file')], TextIO]): + + + +
    + +
    118def save_bioimageio_yaml_only(
    +119    rd: Union[ResourceDescr, BioimageioYamlContent, InvalidDescr],
    +120    /,
    +121    file: Union[NewPath, FilePath, TextIO],
    +122):
    +123    """write the metadata of a resource description (`rd`) to `file`
    +124    without writing any of the referenced files in it.
    +125
    +126    Note: To save a resource description with its associated files as a package,
    +127    use `save_bioimageio_package` or `save_bioimageio_package_as_folder`.
    +128    """
    +129    if isinstance(rd, ResourceDescrBase):
    +130        content = dump_description(rd)
    +131    else:
    +132        content = rd
    +133
    +134    write_yaml(cast(YamlValue, content), file)
    +
    + + +

    write the metadata of a resource description (rd) to file +without writing any of the referenced files in it.

    + +

    Note: To save a resource description with its associated files as a package, +use save_bioimageio_package or save_bioimageio_package_as_folder.

    +
    + + +
    +
    +
    + settings = + + Settings(cache_path=PosixPath('/home/runner/.cache/bioimageio'), id_map='https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/id_map.json', id_map_draft='https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/id_map_draft.json', resolve_draft=True, perform_io_checks=True, log_warnings=True, github_username=None, github_token=None, CI='true', user_agent=None) + + +
    + + + + +
    +
    +
    + SpecificResourceDescr = + + typing.Annotated[typing.Union[typing.Annotated[typing.Union[bioimageio.spec.application.v0_2.ApplicationDescr, ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[bioimageio.spec.dataset.v0_2.DatasetDescr, DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[bioimageio.spec.model.v0_4.ModelDescr, ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[NotebookDescr, NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + + + +
    +
    + +
    + + def + validate_format( data: Dict[str, YamlValue], /, *, format_version: Union[Literal['latest', 'discover'], str] = 'discover', context: Optional[ValidationContext] = None) -> ValidationSummary: + + + +
    + +
    161def validate_format(
    +162    data: BioimageioYamlContent,
    +163    /,
    +164    *,
    +165    format_version: Union[Literal["discover", "latest"], str] = DISCOVER,
    +166    context: Optional[ValidationContext] = None,
    +167) -> ValidationSummary:
    +168    """validate a bioimageio.yaml file (RDF)"""
    +169    with context or validation_context_var.get():
    +170        rd = build_description(data, format_version=format_version)
    +171
    +172    assert rd.validation_summary is not None
    +173    return rd.validation_summary
    +
    + + +

    validate a bioimageio.yaml file (RDF)

    +
    + + +
    +
    + +
    +
    @dataclass(frozen=True)
    + + class + ValidationContext: + + + +
    + +
     19@dataclass(frozen=True)
    + 20class ValidationContext:
    + 21    _context_tokens: "List[Token[ValidationContext]]" = field(
    + 22        init=False, default_factory=list
    + 23    )
    + 24
    + 25    root: Union[RootHttpUrl, AbsoluteDirectory, ZipFile] = Path()
    + 26    """url/directory serving as base to resolve any relative file paths"""
    + 27
    + 28    warning_level: WarningLevel = 50
    + 29    """raise warnings of severity `s` as validation errors if `s >= warning_level`"""
    + 30
    + 31    log_warnings: bool = settings.log_warnings
    + 32    """if `True` log warnings that are not raised to the console"""
    + 33
    + 34    file_name: Optional[FileName] = None
    + 35    """file name of the bioimageio Yaml file"""
    + 36
    + 37    perform_io_checks: bool = settings.perform_io_checks
    + 38    """wether or not to perform validation that requires file io,
    + 39    e.g. downloading a remote files.
    + 40
    + 41    Existence of local absolute file paths is still being checked."""
    + 42
    + 43    known_files: Dict[str, Sha256] = field(default_factory=dict)
    + 44    """allows to bypass download and hashing of referenced files"""
    + 45
    + 46    def replace(
    + 47        self,
    + 48        root: Optional[Union[RootHttpUrl, DirectoryPath, ZipFile]] = None,
    + 49        warning_level: Optional[WarningLevel] = None,
    + 50        log_warnings: Optional[bool] = None,
    + 51        file_name: Optional[str] = None,
    + 52        perform_io_checks: Optional[bool] = None,
    + 53        known_files: Optional[Dict[str, Sha256]] = None,
    + 54    ) -> "ValidationContext":
    + 55        if known_files is None and root is not None and self.root != root:
    + 56            # reset known files if root changes, but no new known_files are given
    + 57            known_files = {}
    + 58
    + 59        return ValidationContext(
    + 60            root=self.root if root is None else root,
    + 61            warning_level=(
    + 62                self.warning_level if warning_level is None else warning_level
    + 63            ),
    + 64            log_warnings=self.log_warnings if log_warnings is None else log_warnings,
    + 65            file_name=self.file_name if file_name is None else file_name,
    + 66            perform_io_checks=(
    + 67                self.perform_io_checks
    + 68                if perform_io_checks is None
    + 69                else perform_io_checks
    + 70            ),
    + 71            known_files=self.known_files if known_files is None else known_files,
    + 72        )
    + 73
    + 74    def __enter__(self):
    + 75        self._context_tokens.append(validation_context_var.set(self))
    + 76        return self
    + 77
    + 78    def __exit__(self, type, value, traceback):  # type: ignore
    + 79        validation_context_var.reset(self._context_tokens.pop(-1))
    + 80
    + 81    @property
    + 82    def source_name(self) -> str:
    + 83        if self.file_name is None:
    + 84            return "in-memory"
    + 85        else:
    + 86            try:
    + 87                if isinstance(self.root, Path):
    + 88                    source = (self.root / self.file_name).absolute()
    + 89                else:
    + 90                    parsed = urlsplit(str(self.root))
    + 91                    path = list(parsed.path.strip("/").split("/")) + [self.file_name]
    + 92                    source = urlunsplit(
    + 93                        (
    + 94                            parsed.scheme,
    + 95                            parsed.netloc,
    + 96                            "/".join(path),
    + 97                            parsed.query,
    + 98                            parsed.fragment,
    + 99                        )
    +100                    )
    +101            except ValueError:
    +102                return self.file_name
    +103            else:
    +104                return str(source)
    +
    + + + + +
    +
    + + ValidationContext( root: Union[bioimageio.spec._internal.root_url.RootHttpUrl, Annotated[pathlib.Path, PathType(path_type='dir'), Predicate(is_absolute)], zipfile.ZipFile] = PosixPath('.'), warning_level: Literal[20, 30, 35, 50] = 50, log_warnings: bool = True, file_name: Optional[str] = None, perform_io_checks: bool = True, known_files: Dict[str, bioimageio.spec._internal.io_basics.Sha256] = <factory>) + + +
    + + + + +
    +
    +
    + root: Union[bioimageio.spec._internal.root_url.RootHttpUrl, Annotated[pathlib.Path, PathType(path_type='dir'), Predicate(is_absolute)], zipfile.ZipFile] = +PosixPath('.') + + +
    + + +

    url/directory serving as base to resolve any relative file paths

    +
    + + +
    +
    +
    + warning_level: Literal[20, 30, 35, 50] = +50 + + +
    + + +

    raise warnings of severity s as validation errors if s >= warning_level

    +
    + + +
    +
    +
    + log_warnings: bool = +True + + +
    + + +

    if True log warnings that are not raised to the console

    +
    + + +
    +
    +
    + file_name: Optional[str] = +None + + +
    + + +

    file name of the bioimageio Yaml file

    +
    + + +
    +
    +
    + perform_io_checks: bool = +True + + +
    + + +

    wether or not to perform validation that requires file io, +e.g. downloading a remote files.

    + +

    Existence of local absolute file paths is still being checked.

    +
    + + +
    +
    +
    + known_files: Dict[str, bioimageio.spec._internal.io_basics.Sha256] + + +
    + + +

    allows to bypass download and hashing of referenced files

    +
    + + +
    +
    + +
    + + def + replace( self, root: Union[bioimageio.spec._internal.root_url.RootHttpUrl, Annotated[pathlib.Path, PathType(path_type='dir')], zipfile.ZipFile, NoneType] = None, warning_level: Optional[Literal[20, 30, 35, 50]] = None, log_warnings: Optional[bool] = None, file_name: Optional[str] = None, perform_io_checks: Optional[bool] = None, known_files: Optional[Dict[str, bioimageio.spec._internal.io_basics.Sha256]] = None) -> ValidationContext: + + + +
    + +
    46    def replace(
    +47        self,
    +48        root: Optional[Union[RootHttpUrl, DirectoryPath, ZipFile]] = None,
    +49        warning_level: Optional[WarningLevel] = None,
    +50        log_warnings: Optional[bool] = None,
    +51        file_name: Optional[str] = None,
    +52        perform_io_checks: Optional[bool] = None,
    +53        known_files: Optional[Dict[str, Sha256]] = None,
    +54    ) -> "ValidationContext":
    +55        if known_files is None and root is not None and self.root != root:
    +56            # reset known files if root changes, but no new known_files are given
    +57            known_files = {}
    +58
    +59        return ValidationContext(
    +60            root=self.root if root is None else root,
    +61            warning_level=(
    +62                self.warning_level if warning_level is None else warning_level
    +63            ),
    +64            log_warnings=self.log_warnings if log_warnings is None else log_warnings,
    +65            file_name=self.file_name if file_name is None else file_name,
    +66            perform_io_checks=(
    +67                self.perform_io_checks
    +68                if perform_io_checks is None
    +69                else perform_io_checks
    +70            ),
    +71            known_files=self.known_files if known_files is None else known_files,
    +72        )
    +
    + + + + +
    +
    + +
    + source_name: str + + + +
    + +
     81    @property
    + 82    def source_name(self) -> str:
    + 83        if self.file_name is None:
    + 84            return "in-memory"
    + 85        else:
    + 86            try:
    + 87                if isinstance(self.root, Path):
    + 88                    source = (self.root / self.file_name).absolute()
    + 89                else:
    + 90                    parsed = urlsplit(str(self.root))
    + 91                    path = list(parsed.path.strip("/").split("/")) + [self.file_name]
    + 92                    source = urlunsplit(
    + 93                        (
    + 94                            parsed.scheme,
    + 95                            parsed.netloc,
    + 96                            "/".join(path),
    + 97                            parsed.query,
    + 98                            parsed.fragment,
    + 99                        )
    +100                    )
    +101            except ValueError:
    +102                return self.file_name
    +103            else:
    +104                return str(source)
    +
    + + + + +
    +
    +
    + +
    + + class + ValidationSummary(pydantic.main.BaseModel): + + + +
    + +
    239class ValidationSummary(BaseModel, extra="allow"):
    +240    """Summarizes output of all bioimageio validations and tests
    +241    for one specific `ResourceDescr` instance."""
    +242
    +243    name: str
    +244    source_name: str
    +245    type: str
    +246    format_version: str
    +247    status: Literal["passed", "failed"]
    +248    details: List[ValidationDetail]
    +249    env: Set[InstalledPackage] = Field(
    +250        default_factory=lambda: {
    +251            InstalledPackage(name="bioimageio.spec", version=VERSION)
    +252        }
    +253    )
    +254    """list of selected, relevant package versions"""
    +255
    +256    conda_list: Optional[Sequence[InstalledPackage]] = None
    +257    """parsed output of conda list"""
    +258
    +259    @property
    +260    def status_icon(self):
    +261        if self.status == "passed":
    +262            return "✔️"
    +263        else:
    +264            return "❌"
    +265
    +266    @property
    +267    def errors(self) -> List[ErrorEntry]:
    +268        return list(chain.from_iterable(d.errors for d in self.details))
    +269
    +270    @property
    +271    def warnings(self) -> List[WarningEntry]:
    +272        return list(chain.from_iterable(d.warnings for d in self.details))
    +273
    +274    def __str__(self):
    +275        return f"{self.__class__.__name__}:\n" + self.format()
    +276
    +277    @staticmethod
    +278    def _format_md_table(rows: List[List[str]]) -> str:
    +279        """format `rows` as markdown table"""
    +280        n_cols = len(rows[0])
    +281        assert all(len(row) == n_cols for row in rows)
    +282        col_widths = [max(max(len(row[i]) for row in rows), 3) for i in range(n_cols)]
    +283
    +284        # fix new lines in table cell
    +285        rows = [[line.replace("\n", "<br>") for line in r] for r in rows]
    +286
    +287        lines = [" | ".join(rows[0][i].center(col_widths[i]) for i in range(n_cols))]
    +288        lines.append(" | ".join("---".center(col_widths[i]) for i in range(n_cols)))
    +289        lines.extend(
    +290            [
    +291                " | ".join(row[i].ljust(col_widths[i]) for i in range(n_cols))
    +292                for row in rows[1:]
    +293            ]
    +294        )
    +295        return "\n| " + " |\n| ".join(lines) + " |\n"
    +296
    +297    def format(
    +298        self,
    +299        hide_tracebacks: bool = False,
    +300        hide_source: bool = False,
    +301        hide_env: bool = False,
    +302        root_loc: Loc = (),
    +303    ) -> str:
    +304        """Format summary as Markdown string
    +305
    +306        Suitable to embed in HTML using '<br>' instead of '\n'.
    +307        """
    +308        info = self._format_md_table(
    +309            [[self.status_icon, f"{self.name.strip('.').strip()} {self.status}"]]
    +310            + ([] if hide_source else [["source", self.source_name]])
    +311            + [
    +312                ["format version", f"{self.type} {self.format_version}"],
    +313            ]
    +314            + ([] if hide_env else [[e.name, e.version] for e in self.env])
    +315        )
    +316
    +317        def format_loc(loc: Loc):
    +318            return "`" + (".".join(map(str, root_loc + loc)) or ".") + "`"
    +319
    +320        details = [["❓", "location", "detail"]]
    +321        for d in self.details:
    +322            details.append([d.status_icon, format_loc(d.loc), d.name])
    +323            if d.context is not None:
    +324                details.append(
    +325                    [
    +326                        "🔍",
    +327                        "context.perform_io_checks",
    +328                        str(d.context["perform_io_checks"]),
    +329                    ]
    +330                )
    +331                if d.context["perform_io_checks"]:
    +332                    details.append(["🔍", "context.root", d.context["root"]])
    +333                    for kfn, sha in d.context["known_files"].items():
    +334                        details.append(["🔍", f"context.known_files.{kfn}", sha])
    +335
    +336                details.append(
    +337                    ["🔍", "context.warning_level", d.context["warning_level"]]
    +338                )
    +339
    +340            if d.recommended_env is not None:
    +341                rec_env = StringIO()
    +342                json_env = d.recommended_env.model_dump(
    +343                    mode="json", exclude_defaults=True
    +344                )
    +345                assert is_yaml_value(json_env)
    +346                write_yaml(json_env, rec_env)
    +347                rec_env_code = rec_env.getvalue().replace("\n", "</code><br><code>")
    +348                details.append(
    +349                    [
    +350                        "🐍",
    +351                        format_loc(d.loc),
    +352                        f"recommended conda env ({d.name})<br>"
    +353                        + f"<pre><code>{rec_env_code}</code></pre>",
    +354                    ]
    +355                )
    +356
    +357            if d.conda_compare:
    +358                details.append(
    +359                    [
    +360                        "🐍",
    +361                        format_loc(d.loc),
    +362                        "conda compare ({d.name}):<br>"
    +363                        + d.conda_compare.replace("\n", "<br>"),
    +364                    ]
    +365                )
    +366
    +367            for entry in d.errors:
    +368                details.append(
    +369                    [
    +370                        "❌",
    +371                        format_loc(entry.loc),
    +372                        entry.msg.replace("\n\n", "<br>").replace("\n", "<br>"),
    +373                    ]
    +374                )
    +375                if hide_tracebacks:
    +376                    continue
    +377
    +378                formatted_tb_lines: List[str] = []
    +379                for tb in entry.traceback:
    +380                    if not (tb_stripped := tb.strip()):
    +381                        continue
    +382
    +383                    first_tb_line, *tb_lines = tb_stripped.split("\n")
    +384                    if (
    +385                        first_tb_line.startswith('File "')
    +386                        and '", line' in first_tb_line
    +387                    ):
    +388                        path, where = first_tb_line[len('File "') :].split('", line')
    +389                        try:
    +390                            p = Path(path)
    +391                        except Exception:
    +392                            file_name = path
    +393                        else:
    +394                            path = p.as_posix()
    +395                            file_name = p.name
    +396
    +397                        where = ", line" + where
    +398                        first_tb_line = f'[{file_name}]({file_name} "{path}"){where}'
    +399
    +400                    if tb_lines:
    +401                        tb_rest = "<br>`" + "`<br>`".join(tb_lines) + "`"
    +402                    else:
    +403                        tb_rest = ""
    +404
    +405                    formatted_tb_lines.append(first_tb_line + tb_rest)
    +406
    +407                details.append(["", "", "<br>".join(formatted_tb_lines)])
    +408
    +409            for entry in d.warnings:
    +410                details.append(["⚠", format_loc(entry.loc), entry.msg])
    +411
    +412        return f"{info}{self._format_md_table(details)}"
    +413
    +414    # TODO: fix bug which casuses extensive white space between the info table and details table
    +415    @no_type_check
    +416    def display(self) -> None:
    +417        formatted = self.format()
    +418        try:
    +419            from IPython.core.getipython import get_ipython
    +420            from IPython.display import Markdown, display
    +421        except ImportError:
    +422            pass
    +423        else:
    +424            if get_ipython() is not None:
    +425                _ = display(Markdown(formatted))
    +426                return
    +427
    +428        rich_markdown = rich.markdown.Markdown(formatted)
    +429        console = rich.console.Console()
    +430        console.print(rich_markdown)
    +431
    +432    def add_detail(self, detail: ValidationDetail):
    +433        if detail.status == "failed":
    +434            self.status = "failed"
    +435        elif detail.status != "passed":
    +436            assert_never(detail.status)
    +437
    +438        self.details.append(detail)
    +439
    +440    @field_validator("env", mode="before")
    +441    def _convert_dict(cls, value: List[Union[List[str], Dict[str, str]]]):
    +442        """convert old env value for backwards compatibility"""
    +443        if isinstance(value, list):
    +444            return [
    +445                (
    +446                    (v["name"], v["version"], v.get("build", ""), v.get("channel", ""))
    +447                    if isinstance(v, dict) and "name" in v and "version" in v
    +448                    else v
    +449                )
    +450                for v in value
    +451            ]
    +452        else:
    +453            return value
    +
    + + +

    Summarizes output of all bioimageio validations and tests +for one specific ResourceDescr instance.

    +
    + + +
    +
    + name: str + + +
    + + + + +
    +
    +
    + source_name: str + + +
    + + + + +
    +
    +
    + type: str + + +
    + + + + +
    +
    +
    + format_version: str + + +
    + + + + +
    +
    +
    + status: Literal['passed', 'failed'] + + +
    + + + + +
    +
    + + + + + +
    +
    + + + +

    list of selected, relevant package versions

    +
    + + +
    +
    +
    + conda_list: Optional[Sequence[bioimageio.spec.summary.InstalledPackage]] + + +
    + + +

    parsed output of conda list

    +
    + + +
    +
    + +
    + status_icon + + + +
    + +
    259    @property
    +260    def status_icon(self):
    +261        if self.status == "passed":
    +262            return "✔️"
    +263        else:
    +264            return "❌"
    +
    + + + + +
    +
    + +
    + errors: List[bioimageio.spec.summary.ErrorEntry] + + + +
    + +
    266    @property
    +267    def errors(self) -> List[ErrorEntry]:
    +268        return list(chain.from_iterable(d.errors for d in self.details))
    +
    + + + + +
    +
    + +
    + warnings: List[bioimageio.spec.summary.WarningEntry] + + + +
    + +
    270    @property
    +271    def warnings(self) -> List[WarningEntry]:
    +272        return list(chain.from_iterable(d.warnings for d in self.details))
    +
    + + + + +
    +
    + +
    + + def + format( self, hide_tracebacks: bool = False, hide_source: bool = False, hide_env: bool = False, root_loc: Tuple[Union[int, str], ...] = ()) -> str: + + + +
    + +
    297    def format(
    +298        self,
    +299        hide_tracebacks: bool = False,
    +300        hide_source: bool = False,
    +301        hide_env: bool = False,
    +302        root_loc: Loc = (),
    +303    ) -> str:
    +304        """Format summary as Markdown string
    +305
    +306        Suitable to embed in HTML using '<br>' instead of '\n'.
    +307        """
    +308        info = self._format_md_table(
    +309            [[self.status_icon, f"{self.name.strip('.').strip()} {self.status}"]]
    +310            + ([] if hide_source else [["source", self.source_name]])
    +311            + [
    +312                ["format version", f"{self.type} {self.format_version}"],
    +313            ]
    +314            + ([] if hide_env else [[e.name, e.version] for e in self.env])
    +315        )
    +316
    +317        def format_loc(loc: Loc):
    +318            return "`" + (".".join(map(str, root_loc + loc)) or ".") + "`"
    +319
    +320        details = [["❓", "location", "detail"]]
    +321        for d in self.details:
    +322            details.append([d.status_icon, format_loc(d.loc), d.name])
    +323            if d.context is not None:
    +324                details.append(
    +325                    [
    +326                        "🔍",
    +327                        "context.perform_io_checks",
    +328                        str(d.context["perform_io_checks"]),
    +329                    ]
    +330                )
    +331                if d.context["perform_io_checks"]:
    +332                    details.append(["🔍", "context.root", d.context["root"]])
    +333                    for kfn, sha in d.context["known_files"].items():
    +334                        details.append(["🔍", f"context.known_files.{kfn}", sha])
    +335
    +336                details.append(
    +337                    ["🔍", "context.warning_level", d.context["warning_level"]]
    +338                )
    +339
    +340            if d.recommended_env is not None:
    +341                rec_env = StringIO()
    +342                json_env = d.recommended_env.model_dump(
    +343                    mode="json", exclude_defaults=True
    +344                )
    +345                assert is_yaml_value(json_env)
    +346                write_yaml(json_env, rec_env)
    +347                rec_env_code = rec_env.getvalue().replace("\n", "</code><br><code>")
    +348                details.append(
    +349                    [
    +350                        "🐍",
    +351                        format_loc(d.loc),
    +352                        f"recommended conda env ({d.name})<br>"
    +353                        + f"<pre><code>{rec_env_code}</code></pre>",
    +354                    ]
    +355                )
    +356
    +357            if d.conda_compare:
    +358                details.append(
    +359                    [
    +360                        "🐍",
    +361                        format_loc(d.loc),
    +362                        "conda compare ({d.name}):<br>"
    +363                        + d.conda_compare.replace("\n", "<br>"),
    +364                    ]
    +365                )
    +366
    +367            for entry in d.errors:
    +368                details.append(
    +369                    [
    +370                        "❌",
    +371                        format_loc(entry.loc),
    +372                        entry.msg.replace("\n\n", "<br>").replace("\n", "<br>"),
    +373                    ]
    +374                )
    +375                if hide_tracebacks:
    +376                    continue
    +377
    +378                formatted_tb_lines: List[str] = []
    +379                for tb in entry.traceback:
    +380                    if not (tb_stripped := tb.strip()):
    +381                        continue
    +382
    +383                    first_tb_line, *tb_lines = tb_stripped.split("\n")
    +384                    if (
    +385                        first_tb_line.startswith('File "')
    +386                        and '", line' in first_tb_line
    +387                    ):
    +388                        path, where = first_tb_line[len('File "') :].split('", line')
    +389                        try:
    +390                            p = Path(path)
    +391                        except Exception:
    +392                            file_name = path
    +393                        else:
    +394                            path = p.as_posix()
    +395                            file_name = p.name
    +396
    +397                        where = ", line" + where
    +398                        first_tb_line = f'[{file_name}]({file_name} "{path}"){where}'
    +399
    +400                    if tb_lines:
    +401                        tb_rest = "<br>`" + "`<br>`".join(tb_lines) + "`"
    +402                    else:
    +403                        tb_rest = ""
    +404
    +405                    formatted_tb_lines.append(first_tb_line + tb_rest)
    +406
    +407                details.append(["", "", "<br>".join(formatted_tb_lines)])
    +408
    +409            for entry in d.warnings:
    +410                details.append(["⚠", format_loc(entry.loc), entry.msg])
    +411
    +412        return f"{info}{self._format_md_table(details)}"
    +
    + + +

    Format summary as Markdown string

    + +
        Suitable to embed in HTML using '<br>' instead of '
    +
    + +

    '.

    +
    + + +
    +
    + +
    +
    @no_type_check
    + + def + display(self) -> None: + + + +
    + +
    415    @no_type_check
    +416    def display(self) -> None:
    +417        formatted = self.format()
    +418        try:
    +419            from IPython.core.getipython import get_ipython
    +420            from IPython.display import Markdown, display
    +421        except ImportError:
    +422            pass
    +423        else:
    +424            if get_ipython() is not None:
    +425                _ = display(Markdown(formatted))
    +426                return
    +427
    +428        rich_markdown = rich.markdown.Markdown(formatted)
    +429        console = rich.console.Console()
    +430        console.print(rich_markdown)
    +
    + + + + +
    +
    + +
    + + def + add_detail(self, detail: bioimageio.spec.summary.ValidationDetail): + + + +
    + +
    432    def add_detail(self, detail: ValidationDetail):
    +433        if detail.status == "failed":
    +434            self.status = "failed"
    +435        elif detail.status != "passed":
    +436            assert_never(detail.status)
    +437
    +438        self.details.append(detail)
    +
    + + + + +
    +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/application.html b/bioimageio/spec/application.html new file mode 100644 index 00000000..69670da5 --- /dev/null +++ b/bioimageio/spec/application.html @@ -0,0 +1,523 @@ + + + + + + + bioimageio.spec.application API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.application

    + +

    implementaions of all released minor versions are available in submodules:

    + + +
    + + + + + +
     1# autogen: start
    + 2"""
    + 3implementaions of all released minor versions are available in submodules:
    + 4- application v0_2: `bioimageio.spec.application.v0_2.ApplicationDescr`
    + 5- application v0_3: `bioimageio.spec.application.v0_3.ApplicationDescr`
    + 6"""
    + 7
    + 8from typing import Union
    + 9
    +10from pydantic import Discriminator
    +11from typing_extensions import Annotated
    +12
    +13from . import v0_2, v0_3
    +14
    +15ApplicationDescr = v0_3.ApplicationDescr
    +16ApplicationDescr_v0_2 = v0_2.ApplicationDescr
    +17ApplicationDescr_v0_3 = v0_3.ApplicationDescr
    +18
    +19AnyApplicationDescr = Annotated[
    +20    Union[ApplicationDescr_v0_2, ApplicationDescr_v0_3], Discriminator("format_version")
    +21]
    +22"""Union of any released application desription"""
    +23# autogen: stop
    +
    + + +
    +
    + +
    + + class + ApplicationDescr(bioimageio.spec.generic.v0_3.GenericDescrBase): + + + +
    + +
    32class ApplicationDescr(GenericDescrBase, title="bioimage.io application specification"):
    +33    """Bioimage.io description of an application."""
    +34
    +35    type: Literal["application"] = "application"
    +36
    +37    id: Optional[ApplicationId] = None
    +38    """bioimage.io-wide unique resource identifier
    +39    assigned by bioimage.io; version **un**specific."""
    +40
    +41    parent: Optional[ApplicationId] = None
    +42    """The description from which this one is derived"""
    +43
    +44    source: Annotated[
    +45        Optional[ImportantFileSource],
    +46        Field(description="URL or path to the source of the application"),
    +47    ] = None
    +48    """The primary source of the application"""
    +
    + + +

    Bioimage.io description of an application.

    +
    + + +
    +
    + type: Literal['application'] + + +
    + + + + +
    +
    + + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    + + + +

    The description from which this one is derived

    +
    + + +
    +
    +
    + source: Annotated[Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, description='URL or path to the source of the application')] + + +
    + + +

    The primary source of the application

    +
    + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.3.0' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 3, 0) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    +
    + ApplicationDescr_v0_2 = +<class 'bioimageio.spec.application.v0_2.ApplicationDescr'> + + +
    + + + + +
    +
    +
    + ApplicationDescr_v0_3 = +<class 'ApplicationDescr'> + + +
    + + + + +
    +
    +
    + AnyApplicationDescr = + + typing.Annotated[typing.Union[bioimageio.spec.application.v0_2.ApplicationDescr, ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + +

    Union of any released application desription

    +
    + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/application/v0_2.html b/bioimageio/spec/application/v0_2.html new file mode 100644 index 00000000..0bcb8317 --- /dev/null +++ b/bioimageio/spec/application/v0_2.html @@ -0,0 +1,588 @@ + + + + + + + bioimageio.spec.application.v0_2 API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.application.v0_2

    + + + + + + +
     1from typing import Literal, Optional
    + 2
    + 3from pydantic import Field
    + 4from typing_extensions import Annotated
    + 5
    + 6from .._internal.common_nodes import Node
    + 7from .._internal.io_basics import AbsoluteFilePath as AbsoluteFilePath
    + 8from .._internal.types import ImportantFileSource
    + 9from .._internal.url import HttpUrl as HttpUrl
    +10from ..generic.v0_2 import VALID_COVER_IMAGE_EXTENSIONS as VALID_COVER_IMAGE_EXTENSIONS
    +11from ..generic.v0_2 import AttachmentsDescr as AttachmentsDescr
    +12from ..generic.v0_2 import Author as Author
    +13from ..generic.v0_2 import BadgeDescr as BadgeDescr
    +14from ..generic.v0_2 import CiteEntry as CiteEntry
    +15from ..generic.v0_2 import Doi as Doi
    +16from ..generic.v0_2 import GenericDescrBase
    +17from ..generic.v0_2 import LinkedResource as LinkedResource
    +18from ..generic.v0_2 import Maintainer as Maintainer
    +19from ..generic.v0_2 import OrcidId as OrcidId
    +20from ..generic.v0_2 import RelativeFilePath as RelativeFilePath
    +21from ..generic.v0_2 import ResourceId as ResourceId
    +22from ..generic.v0_2 import Uploader as Uploader
    +23from ..generic.v0_2 import Version as Version
    +24
    +25
    +26class ApplicationId(ResourceId):
    +27    pass
    +28
    +29
    +30class ApplicationDescr(GenericDescrBase, title="bioimage.io application specification"):
    +31    """Bioimage.io description of an application."""
    +32
    +33    type: Literal["application"] = "application"
    +34
    +35    id: Optional[ApplicationId] = None
    +36    """bioimage.io-wide unique resource identifier
    +37    assigned by bioimage.io; version **un**specific."""
    +38
    +39    source: Annotated[
    +40        Optional[ImportantFileSource],
    +41        Field(description="URL or path to the source of the application"),
    +42    ] = None
    +43    """The primary source of the application"""
    +44
    +45
    +46class LinkedApplication(Node):
    +47    """Reference to a bioimage.io application."""
    +48
    +49    id: ApplicationId
    +50    """A valid application `id` from the bioimage.io collection."""
    +51
    +52    version_number: Optional[int] = None
    +53    """version number (n-th published version, not the semantic version) of linked application"""
    +
    + + +
    +
    + +
    + + class + ApplicationId(bioimageio.spec.generic.v0_2.ResourceId): + + + +
    + +
    27class ApplicationId(ResourceId):
    +28    pass
    +
    + + +

    str(object='') -> str +str(bytes_or_buffer[, encoding[, errors]]) -> str

    + +

    Create a new string object from the given object. If encoding or +errors is specified, then the object must expose a data buffer +that will be decoded using the given encoding and error handler. +Otherwise, returns the result of object.__str__() (if defined) +or repr(object). +encoding defaults to sys.getdefaultencoding(). +errors defaults to 'strict'.

    +
    + + +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + ApplicationDescr(bioimageio.spec.generic.v0_2.GenericDescrBase): + + + +
    + +
    31class ApplicationDescr(GenericDescrBase, title="bioimage.io application specification"):
    +32    """Bioimage.io description of an application."""
    +33
    +34    type: Literal["application"] = "application"
    +35
    +36    id: Optional[ApplicationId] = None
    +37    """bioimage.io-wide unique resource identifier
    +38    assigned by bioimage.io; version **un**specific."""
    +39
    +40    source: Annotated[
    +41        Optional[ImportantFileSource],
    +42        Field(description="URL or path to the source of the application"),
    +43    ] = None
    +44    """The primary source of the application"""
    +
    + + +

    Bioimage.io description of an application.

    +
    + + +
    +
    + type: Literal['application'] + + +
    + + + + +
    +
    +
    + id: Optional[ApplicationId] + + +
    + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    +
    + source: Annotated[Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, description='URL or path to the source of the application')] + + +
    + + +

    The primary source of the application

    +
    + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.2.4' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 2, 4) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    + +
    + + class + LinkedApplication(bioimageio.spec._internal.node.Node): + + + +
    + +
    47class LinkedApplication(Node):
    +48    """Reference to a bioimage.io application."""
    +49
    +50    id: ApplicationId
    +51    """A valid application `id` from the bioimage.io collection."""
    +52
    +53    version_number: Optional[int] = None
    +54    """version number (n-th published version, not the semantic version) of linked application"""
    +
    + + +

    Reference to a bioimage.io application.

    +
    + + +
    +
    + id: ApplicationId + + +
    + + +

    A valid application id from the bioimage.io collection.

    +
    + + +
    +
    +
    + version_number: Optional[int] + + +
    + + +

    version number (n-th published version, not the semantic version) of linked application

    +
    + + +
    +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/application/v0_3.html b/bioimageio/spec/application/v0_3.html new file mode 100644 index 00000000..b80e5fe1 --- /dev/null +++ b/bioimageio/spec/application/v0_3.html @@ -0,0 +1,594 @@ + + + + + + + bioimageio.spec.application.v0_3 API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.application.v0_3

    + + + + + + +
     1from typing import Literal, Optional
    + 2
    + 3from pydantic import Field
    + 4from typing_extensions import Annotated
    + 5
    + 6from .._internal.io import FileDescr as FileDescr
    + 7from .._internal.io_basics import AbsoluteFilePath as AbsoluteFilePath
    + 8from .._internal.io_basics import Sha256 as Sha256
    + 9from .._internal.types import ImportantFileSource
    +10from .._internal.url import HttpUrl as HttpUrl
    +11from ..generic.v0_3 import VALID_COVER_IMAGE_EXTENSIONS as VALID_COVER_IMAGE_EXTENSIONS
    +12from ..generic.v0_3 import Author as Author
    +13from ..generic.v0_3 import BadgeDescr as BadgeDescr
    +14from ..generic.v0_3 import CiteEntry as CiteEntry
    +15from ..generic.v0_3 import DeprecatedLicenseId as DeprecatedLicenseId
    +16from ..generic.v0_3 import Doi as Doi
    +17from ..generic.v0_3 import GenericDescrBase, LinkedResourceNode, ResourceId
    +18from ..generic.v0_3 import LicenseId as LicenseId
    +19from ..generic.v0_3 import LinkedResource as LinkedResource
    +20from ..generic.v0_3 import Maintainer as Maintainer
    +21from ..generic.v0_3 import OrcidId as OrcidId
    +22from ..generic.v0_3 import RelativeFilePath as RelativeFilePath
    +23from ..generic.v0_3 import Uploader as Uploader
    +24from ..generic.v0_3 import Version as Version
    +25
    +26
    +27class ApplicationId(ResourceId):
    +28    pass
    +29
    +30
    +31class ApplicationDescr(GenericDescrBase, title="bioimage.io application specification"):
    +32    """Bioimage.io description of an application."""
    +33
    +34    type: Literal["application"] = "application"
    +35
    +36    id: Optional[ApplicationId] = None
    +37    """bioimage.io-wide unique resource identifier
    +38    assigned by bioimage.io; version **un**specific."""
    +39
    +40    parent: Optional[ApplicationId] = None
    +41    """The description from which this one is derived"""
    +42
    +43    source: Annotated[
    +44        Optional[ImportantFileSource],
    +45        Field(description="URL or path to the source of the application"),
    +46    ] = None
    +47    """The primary source of the application"""
    +48
    +49
    +50class LinkedApplication(LinkedResourceNode):
    +51    """Reference to a bioimage.io application."""
    +52
    +53    id: ApplicationId
    +54    """A valid application `id` from the bioimage.io collection."""
    +
    + + +
    +
    + +
    + + class + ApplicationId(bioimageio.spec.generic.v0_3.ResourceId): + + + +
    + +
    28class ApplicationId(ResourceId):
    +29    pass
    +
    + + +

    str(object='') -> str +str(bytes_or_buffer[, encoding[, errors]]) -> str

    + +

    Create a new string object from the given object. If encoding or +errors is specified, then the object must expose a data buffer +that will be decoded using the given encoding and error handler. +Otherwise, returns the result of object.__str__() (if defined) +or repr(object). +encoding defaults to sys.getdefaultencoding(). +errors defaults to 'strict'.

    +
    + + +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + ApplicationDescr(bioimageio.spec.generic.v0_3.GenericDescrBase): + + + +
    + +
    32class ApplicationDescr(GenericDescrBase, title="bioimage.io application specification"):
    +33    """Bioimage.io description of an application."""
    +34
    +35    type: Literal["application"] = "application"
    +36
    +37    id: Optional[ApplicationId] = None
    +38    """bioimage.io-wide unique resource identifier
    +39    assigned by bioimage.io; version **un**specific."""
    +40
    +41    parent: Optional[ApplicationId] = None
    +42    """The description from which this one is derived"""
    +43
    +44    source: Annotated[
    +45        Optional[ImportantFileSource],
    +46        Field(description="URL or path to the source of the application"),
    +47    ] = None
    +48    """The primary source of the application"""
    +
    + + +

    Bioimage.io description of an application.

    +
    + + +
    +
    + type: Literal['application'] + + +
    + + + + +
    +
    +
    + id: Optional[ApplicationId] + + +
    + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    +
    + parent: Optional[ApplicationId] + + +
    + + +

    The description from which this one is derived

    +
    + + +
    +
    +
    + source: Annotated[Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, description='URL or path to the source of the application')] + + +
    + + +

    The primary source of the application

    +
    + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.3.0' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 3, 0) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    + +
    + + class + LinkedApplication(bioimageio.spec.generic.v0_3.LinkedResourceNode): + + + +
    + +
    51class LinkedApplication(LinkedResourceNode):
    +52    """Reference to a bioimage.io application."""
    +53
    +54    id: ApplicationId
    +55    """A valid application `id` from the bioimage.io collection."""
    +
    + + +

    Reference to a bioimage.io application.

    +
    + + +
    +
    + id: ApplicationId + + +
    + + +

    A valid application id from the bioimage.io collection.

    +
    + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/common.html b/bioimageio/spec/common.html new file mode 100644 index 00000000..b669f9c3 --- /dev/null +++ b/bioimageio/spec/common.html @@ -0,0 +1,1281 @@ + + + + + + + bioimageio.spec.common API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.common

    + + + + + + +
     1from pydantic import ValidationError
    + 2
    + 3from ._internal.common_nodes import InvalidDescr
    + 4from ._internal.io import (
    + 5    BioimageioYamlContent,
    + 6    BioimageioYamlSource,
    + 7    FileDescr,
    + 8    YamlValue,
    + 9)
    +10from ._internal.io_basics import AbsoluteDirectory, AbsoluteFilePath, FileName, Sha256
    +11from ._internal.root_url import RootHttpUrl
    +12from ._internal.types import FileSource, PermissiveFileSource, RelativeFilePath
    +13from ._internal.url import HttpUrl
    +14
    +15__all__ = [
    +16    "AbsoluteDirectory",
    +17    "AbsoluteFilePath",
    +18    "BioimageioYamlContent",
    +19    "BioimageioYamlSource",
    +20    "FileDescr",
    +21    "FileName",
    +22    "FileSource",
    +23    "HttpUrl",
    +24    "InvalidDescr",
    +25    "PermissiveFileSource",
    +26    "RelativeFilePath",
    +27    "RootHttpUrl",
    +28    "Sha256",
    +29    "ValidationError",
    +30    "YamlValue",
    +31]
    +
    + + +
    +
    +
    + AbsoluteDirectory = +typing.Annotated[pathlib.Path, PathType(path_type='dir'), Predicate(is_absolute)] + + +
    + + + + +
    +
    +
    + AbsoluteFilePath = +typing.Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute)] + + +
    + + + + +
    +
    +
    + BioimageioYamlContent = +typing.Dict[str, YamlValue] + + +
    + + + + +
    +
    +
    + BioimageioYamlSource = + + typing.Union[typing.Annotated[typing.Union[HttpUrl, RelativeFilePath, typing.Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, typing.Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], typing.Dict[str, YamlValue]] + + +
    + + + + +
    +
    + +
    + + class + FileDescr(bioimageio.spec._internal.node.Node): + + + +
    + +
    768class FileDescr(Node):
    +769    source: ImportantFileSource
    +770    """∈📦 file source"""
    +771
    +772    sha256: Optional[Sha256] = None
    +773    """SHA256 checksum of the source file"""
    +774
    +775    @model_validator(mode="after")
    +776    def validate_sha256(self) -> Self:
    +777        context = validation_context_var.get()
    +778        if not context.perform_io_checks:
    +779            return self
    +780        elif (src_str := str(self.source)) in context.known_files:
    +781            actual_sha = context.known_files[src_str]
    +782        else:
    +783            local_source = download(self.source, sha256=self.sha256).path
    +784            actual_sha = get_sha256(local_source)
    +785            context.known_files[str(self.source)] = actual_sha
    +786
    +787        if self.sha256 is None:
    +788            self.sha256 = actual_sha
    +789        elif self.sha256 != actual_sha:
    +790            raise ValueError(
    +791                f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
    +792                + f"{actual_sha}. Update expected `sha256` or point to the matching "
    +793                + "file."
    +794            )
    +795
    +796        return self
    +797
    +798    def download(self):
    +799
    +800        return download(self.source, sha256=self.sha256)
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + source: Annotated[Union[HttpUrl, RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')] + + +
    + + +

    ∈📦 file source

    +
    + + +
    +
    +
    + sha256: Optional[Sha256] + + +
    + + +

    SHA256 checksum of the source file

    +
    + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + validate_sha256(self) -> Self: + + + +
    + +
    775    @model_validator(mode="after")
    +776    def validate_sha256(self) -> Self:
    +777        context = validation_context_var.get()
    +778        if not context.perform_io_checks:
    +779            return self
    +780        elif (src_str := str(self.source)) in context.known_files:
    +781            actual_sha = context.known_files[src_str]
    +782        else:
    +783            local_source = download(self.source, sha256=self.sha256).path
    +784            actual_sha = get_sha256(local_source)
    +785            context.known_files[str(self.source)] = actual_sha
    +786
    +787        if self.sha256 is None:
    +788            self.sha256 = actual_sha
    +789        elif self.sha256 != actual_sha:
    +790            raise ValueError(
    +791                f"Sha256 mismatch for {self.source}. Expected {self.sha256}, got "
    +792                + f"{actual_sha}. Update expected `sha256` or point to the matching "
    +793                + "file."
    +794            )
    +795
    +796        return self
    +
    + + + + +
    +
    + +
    + + def + download(self): + + + +
    + +
    798    def download(self):
    +799
    +800        return download(self.source, sha256=self.sha256)
    +
    + + + + +
    +
    +
    +
    + FileName = +<class 'str'> + + +
    + + + + +
    +
    +
    + FileSource = + + typing.Annotated[typing.Union[HttpUrl, RelativeFilePath, typing.Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])] + + +
    + + + + +
    +
    + +
    + + class + HttpUrl(bioimageio.spec.common.RootHttpUrl): + + + +
    + +
    118class HttpUrl(RootHttpUrl):
    +119    root_model: ClassVar[Type[RootModel[Any]]] = RootModel[pydantic.HttpUrl]
    +120    _exists: Optional[bool] = None
    +121
    +122    @model_validator(mode="after")
    +123    def _validate_url(self):
    +124        url = self._validated
    +125        context = validation_context_var.get()
    +126        if context.perform_io_checks and str(url) not in context.known_files:
    +127            self._validated = _validate_url(url)
    +128            self._exists = True
    +129
    +130        return self
    +131
    +132    def exists(self):
    +133        """True if URL is available"""
    +134        if self._exists is None:
    +135            try:
    +136                self._validated = _validate_url(self._validated)
    +137            except Exception as e:
    +138                logger.info(e)
    +139                self._exists = False
    +140            else:
    +141                self._exists = True
    +142
    +143        return self._exists
    +
    + + +

    A 'URL folder', possibly an invalid http URL

    +
    + + +
    +
    + root_model: ClassVar[Type[pydantic.root_model.RootModel[Any]]] = + + <class 'pydantic.root_model.RootModel[Annotated[Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)]]'> + + +
    + + +

    the pydantic root model to validate the string

    +
    + + +
    +
    + +
    + + def + exists(self): + + + +
    + +
    132    def exists(self):
    +133        """True if URL is available"""
    +134        if self._exists is None:
    +135            try:
    +136                self._validated = _validate_url(self._validated)
    +137            except Exception as e:
    +138                logger.info(e)
    +139                self._exists = False
    +140            else:
    +141                self._exists = True
    +142
    +143        return self._exists
    +
    + + +

    True if URL is available

    +
    + + +
    +
    +
    + +
    + + class + InvalidDescr(bioimageio.spec._internal.common_nodes.ResourceDescrBase): + + + +
    + +
    514class InvalidDescr(
    +515    ResourceDescrBase,
    +516    extra="allow",
    +517    title="An invalid resource description",
    +518):
    +519    """A representation of an invalid resource description"""
    +520
    +521    type: Any = "unknown"
    +522    format_version: Any = "unknown"
    +523    fields_to_set_explicitly: ClassVar[FrozenSet[LiteralString]] = frozenset()
    +
    + + +

    A representation of an invalid resource description

    +
    + + +
    +
    + type: Any + + +
    + + + + +
    +
    +
    + format_version: Any + + +
    + + + + +
    +
    +
    + fields_to_set_explicitly: ClassVar[FrozenSet[LiteralString]] = +frozenset() + + +
    + + +

    set set these fields explicitly with their default value if they are not set, +such that they are always included even when dumping with 'exlude_unset'

    +
    + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'unknown' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 0, 0) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    +
    +
    +
    + PermissiveFileSource = + + typing.Union[typing.Annotated[typing.Union[HttpUrl, RelativeFilePath, typing.Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, typing.Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)]] + + +
    + + + + +
    +
    + +
    + + class + RelativeFilePath(pydantic.root_model.RootModel[PurePath], typing.Generic[~AbsolutePathT]): + + + +
    + +
    186class RelativeFilePath(
    +187    RelativePathBase[Union[AbsoluteFilePath, HttpUrl, ZipPath]], frozen=True
    +188):
    +189    """A path relative to the `rdf.yaml` file (also if the RDF source is a URL)."""
    +190
    +191    def model_post_init(self, __context: Any) -> None:
    +192        """add validation @private"""
    +193        if not self.root.parts:  # an empty path can only be a directory
    +194            raise ValueError(f"{self.root} is not a valid file path.")
    +195
    +196        super().model_post_init(__context)
    +197
    +198    def get_absolute(
    +199        self, root: "RootHttpUrl | Path | AnyUrl | ZipFile"
    +200    ) -> "AbsoluteFilePath | HttpUrl | ZipPath":
    +201        absolute = self._get_absolute_impl(root)
    +202        if (
    +203            isinstance(absolute, Path)
    +204            and (context := validation_context_var.get()).perform_io_checks
    +205            and str(self.root) not in context.known_files
    +206            and not absolute.is_file()
    +207        ):
    +208            raise ValueError(f"{absolute} does not point to an existing file")
    +209
    +210        return absolute
    +
    + + +

    A path relative to the rdf.yaml file (also if the RDF source is a URL).

    +
    + + +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    +
    + +
    + + def + get_absolute( self, root: RootHttpUrl | pathlib.Path | pydantic_core._pydantic_core.Url | zipfile.ZipFile) -> Union[Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute)], HttpUrl, zipp.Path]: + + + +
    + +
    198    def get_absolute(
    +199        self, root: "RootHttpUrl | Path | AnyUrl | ZipFile"
    +200    ) -> "AbsoluteFilePath | HttpUrl | ZipPath":
    +201        absolute = self._get_absolute_impl(root)
    +202        if (
    +203            isinstance(absolute, Path)
    +204            and (context := validation_context_var.get()).perform_io_checks
    +205            and str(self.root) not in context.known_files
    +206            and not absolute.is_file()
    +207        ):
    +208            raise ValueError(f"{absolute} does not point to an existing file")
    +209
    +210        return absolute
    +
    + + + + +
    +
    +
    + +
    + + class + RootHttpUrl(bioimageio.spec._internal.validated_string.ValidatedString): + + + +
    + +
    13class RootHttpUrl(ValidatedString):
    +14    """A 'URL folder', possibly an invalid http URL"""
    +15
    +16    root_model: ClassVar[Type[RootModel[Any]]] = RootModel[pydantic.HttpUrl]
    +17    _validated: pydantic.HttpUrl
    +18
    +19    def absolute(self):
    +20        """analog to `absolute` method of pathlib."""
    +21        return self
    +22
    +23    @property
    +24    def scheme(self) -> str:
    +25        return self._validated.scheme
    +26
    +27    @property
    +28    def host(self) -> Optional[str]:
    +29        return self._validated.host
    +30
    +31    @property
    +32    def path(self) -> Optional[str]:
    +33        return self._validated.path
    +34
    +35    @property
    +36    def parent(self) -> RootHttpUrl:
    +37        parsed = urlsplit(str(self))
    +38        path = list(parsed.path.split("/"))
    +39        if (
    +40            parsed.netloc == "zenodo.org"
    +41            and parsed.path.startswith("/api/records/")
    +42            and parsed.path.endswith("/content")
    +43        ):
    +44            path[-2:-1] = []
    +45        else:
    +46            path = path[:-1]
    +47
    +48        return RootHttpUrl(
    +49            urlunsplit(
    +50                (
    +51                    parsed.scheme,
    +52                    parsed.netloc,
    +53                    "/".join(path),
    +54                    parsed.query,
    +55                    parsed.fragment,
    +56                )
    +57            )
    +58        )
    +
    + + +

    A 'URL folder', possibly an invalid http URL

    +
    + + +
    +
    + root_model: ClassVar[Type[pydantic.root_model.RootModel[Any]]] = + + <class 'pydantic.root_model.RootModel[Annotated[Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)]]'> + + +
    + + +

    the pydantic root model to validate the string

    +
    + + +
    +
    + +
    + + def + absolute(self): + + + +
    + +
    19    def absolute(self):
    +20        """analog to `absolute` method of pathlib."""
    +21        return self
    +
    + + +

    analog to absolute method of pathlib.

    +
    + + +
    +
    + +
    + scheme: str + + + +
    + +
    23    @property
    +24    def scheme(self) -> str:
    +25        return self._validated.scheme
    +
    + + + + +
    +
    + +
    + host: Optional[str] + + + +
    + +
    27    @property
    +28    def host(self) -> Optional[str]:
    +29        return self._validated.host
    +
    + + + + +
    +
    + +
    + path: Optional[str] + + + +
    + +
    31    @property
    +32    def path(self) -> Optional[str]:
    +33        return self._validated.path
    +
    + + + + +
    +
    + +
    + parent: RootHttpUrl + + + +
    + +
    35    @property
    +36    def parent(self) -> RootHttpUrl:
    +37        parsed = urlsplit(str(self))
    +38        path = list(parsed.path.split("/"))
    +39        if (
    +40            parsed.netloc == "zenodo.org"
    +41            and parsed.path.startswith("/api/records/")
    +42            and parsed.path.endswith("/content")
    +43        ):
    +44            path[-2:-1] = []
    +45        else:
    +46            path = path[:-1]
    +47
    +48        return RootHttpUrl(
    +49            urlunsplit(
    +50                (
    +51                    parsed.scheme,
    +52                    parsed.netloc,
    +53                    "/".join(path),
    +54                    parsed.query,
    +55                    parsed.fragment,
    +56                )
    +57            )
    +58        )
    +
    + + + + +
    +
    +
    + +
    + + class + Sha256(bioimageio.spec._internal.validated_string.ValidatedString): + + + +
    + +
    23class Sha256(ValidatedString):
    +24    """SHA-256 hash value"""
    +25
    +26    root_model: ClassVar[Type[RootModel[Any]]] = RootModel[
    +27        Annotated[
    +28            str,
    +29            StringConstraints(
    +30                strip_whitespace=True, to_lower=True, min_length=64, max_length=64
    +31            ),
    +32        ]
    +33    ]
    +
    + + +

    SHA-256 hash value

    +
    + + +
    +
    + root_model: ClassVar[Type[pydantic.root_model.RootModel[Any]]] = +<class 'pydantic.root_model.RootModel[Annotated[str, StringConstraints]]'> + + +
    + + +

    the pydantic root model to validate the string

    +
    + + +
    +
    +
    +
    + + class + ValidationError(builtins.ValueError): + + +
    + + +

    Inappropriate argument value (of correct type).

    +
    + + +
    +
    + + def + from_exception_data(title, line_errors, input_type='python', hide_input=False): + + +
    + + + + +
    +
    +
    + + def + error_count(self, /): + + +
    + + + + +
    +
    +
    + + def + errors( self, /, *, include_url=True, include_context=True, include_input=True): + + +
    + + + + +
    +
    +
    + + def + json( self, /, *, indent=None, include_url=True, include_context=True, include_input=True): + + +
    + + + + +
    +
    +
    + title + + +
    + + + + +
    +
    +
    +
    + type YamlValue = + + Union[bool, datetime.date, datetime.datetime, int, float, str, NoneType, List[ForwardRef('YamlValue')], Dict[Union[bool, datetime.date, datetime.datetime, int, float, str, NoneType, Tuple[Union[bool, datetime.date, datetime.datetime, int, float, str, NoneType], ...]], ForwardRef('YamlValue')]] + + +
    + + + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/conda_env.html b/bioimageio/spec/conda_env.html new file mode 100644 index 00000000..82ee2cde --- /dev/null +++ b/bioimageio/spec/conda_env.html @@ -0,0 +1,603 @@ + + + + + + + bioimageio.spec.conda_env API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.conda_env

    + + + + + + +
      1import warnings
    +  2from typing import Any, List, Optional, Union
    +  3
    +  4from pydantic import BaseModel, Field, field_validator, model_validator
    +  5
    +  6
    +  7class PipDeps(BaseModel):
    +  8    """Pip dependencies to include in conda dependecies"""
    +  9
    + 10    pip: List[str] = Field(default_factory=list)
    + 11
    + 12    def __lt__(self, other: Any):
    + 13        if isinstance(other, PipDeps):
    + 14            return len(self.pip) < len(other.pip)
    + 15        else:
    + 16            return False
    + 17
    + 18    def __gt__(self, other: Any):
    + 19        if isinstance(other, PipDeps):
    + 20            return len(self.pip) > len(other.pip)
    + 21        else:
    + 22            return False
    + 23
    + 24
    + 25class CondaEnv(BaseModel):
    + 26    """Represenation of the content of a conda environment.yaml file"""
    + 27
    + 28    name: Optional[str] = None
    + 29    channels: List[str] = Field(default_factory=list)
    + 30    dependencies: List[Union[str, PipDeps]] = Field(default_factory=list)
    + 31
    + 32    @field_validator("name", mode="after")
    + 33    def _ensure_valid_conda_env_name(cls, value: Optional[str]) -> Optional[str]:
    + 34        if value is None:
    + 35            return None
    + 36
    + 37        for illegal in ("/", " ", ":", "#"):
    + 38            value = value.replace(illegal, "")
    + 39
    + 40        return value or "empty"
    + 41
    + 42    @property
    + 43    def wo_name(self):
    + 44        return self.model_construct(**{k: v for k, v in self if k != "name"})
    + 45
    + 46    def _get_version(self, package: str):
    + 47        """Helper to return any verison pin for **package**
    + 48
    + 49        TODO: improve: interprete version pin and return structured information.
    + 50        """
    + 51        for d in self.dependencies:
    + 52            if isinstance(d, PipDeps):
    + 53                for p in d.pip:
    + 54                    if p.startswith(package):
    + 55                        return p[len(package) :]
    + 56            elif d.startswith(package):
    + 57                return d[len(package) :]
    + 58
    + 59
    + 60class BioimageioCondaEnv(CondaEnv):
    + 61    """A special `CondaEnv` that
    + 62    - automatically adds bioimageio specific dependencies
    + 63    - sorts dependencies
    + 64    """
    + 65
    + 66    @model_validator(mode="after")
    + 67    def _normalize_bioimageio_conda_env(self):
    + 68        """update a conda env such that we have bioimageio.core and sorted dependencies"""
    + 69        for req_channel in ("conda-forge", "nodefaults"):
    + 70            if req_channel not in self.channels:
    + 71                self.channels.append(req_channel)
    + 72
    + 73        if "defaults" in self.channels:
    + 74            warnings.warn("removing 'defaults' from conda-channels")
    + 75            self.channels.remove("defaults")
    + 76
    + 77        if "pip" not in self.dependencies:
    + 78            self.dependencies.append("pip")
    + 79
    + 80        for dep in self.dependencies:
    + 81            if isinstance(dep, PipDeps):
    + 82                pip_section = dep
    + 83                pip_section.pip.sort()
    + 84                break
    + 85        else:
    + 86            pip_section = None
    + 87
    + 88        if (
    + 89            pip_section is not None
    + 90            and any(pd.startswith("bioimageio.core") for pd in pip_section.pip)
    + 91        ) and not any(
    + 92            d.startswith("bioimageio.core")
    + 93            or d.startswith("conda-forge::bioimageio.core")
    + 94            for d in self.dependencies
    + 95            if not isinstance(d, PipDeps)
    + 96        ):
    + 97            self.dependencies.append("conda-forge::bioimageio.core")
    + 98
    + 99        self.dependencies.sort()
    +100        return self
    +
    + + +
    +
    + +
    + + class + PipDeps(pydantic.main.BaseModel): + + + +
    + +
     8class PipDeps(BaseModel):
    + 9    """Pip dependencies to include in conda dependecies"""
    +10
    +11    pip: List[str] = Field(default_factory=list)
    +12
    +13    def __lt__(self, other: Any):
    +14        if isinstance(other, PipDeps):
    +15            return len(self.pip) < len(other.pip)
    +16        else:
    +17            return False
    +18
    +19    def __gt__(self, other: Any):
    +20        if isinstance(other, PipDeps):
    +21            return len(self.pip) > len(other.pip)
    +22        else:
    +23            return False
    +
    + + +

    Pip dependencies to include in conda dependecies

    +
    + + +
    +
    + pip: List[str] + + +
    + + + + +
    +
    +
    + +
    + + class + CondaEnv(pydantic.main.BaseModel): + + + +
    + +
    26class CondaEnv(BaseModel):
    +27    """Represenation of the content of a conda environment.yaml file"""
    +28
    +29    name: Optional[str] = None
    +30    channels: List[str] = Field(default_factory=list)
    +31    dependencies: List[Union[str, PipDeps]] = Field(default_factory=list)
    +32
    +33    @field_validator("name", mode="after")
    +34    def _ensure_valid_conda_env_name(cls, value: Optional[str]) -> Optional[str]:
    +35        if value is None:
    +36            return None
    +37
    +38        for illegal in ("/", " ", ":", "#"):
    +39            value = value.replace(illegal, "")
    +40
    +41        return value or "empty"
    +42
    +43    @property
    +44    def wo_name(self):
    +45        return self.model_construct(**{k: v for k, v in self if k != "name"})
    +46
    +47    def _get_version(self, package: str):
    +48        """Helper to return any verison pin for **package**
    +49
    +50        TODO: improve: interprete version pin and return structured information.
    +51        """
    +52        for d in self.dependencies:
    +53            if isinstance(d, PipDeps):
    +54                for p in d.pip:
    +55                    if p.startswith(package):
    +56                        return p[len(package) :]
    +57            elif d.startswith(package):
    +58                return d[len(package) :]
    +
    + + +

    Represenation of the content of a conda environment.yaml file

    +
    + + +
    +
    + name: Optional[str] + + +
    + + + + +
    +
    +
    + channels: List[str] + + +
    + + + + +
    +
    +
    + dependencies: List[Union[str, PipDeps]] + + +
    + + + + +
    +
    + +
    + wo_name + + + +
    + +
    43    @property
    +44    def wo_name(self):
    +45        return self.model_construct(**{k: v for k, v in self if k != "name"})
    +
    + + + + +
    +
    +
    + +
    + + class + BioimageioCondaEnv(CondaEnv): + + + +
    + +
     61class BioimageioCondaEnv(CondaEnv):
    + 62    """A special `CondaEnv` that
    + 63    - automatically adds bioimageio specific dependencies
    + 64    - sorts dependencies
    + 65    """
    + 66
    + 67    @model_validator(mode="after")
    + 68    def _normalize_bioimageio_conda_env(self):
    + 69        """update a conda env such that we have bioimageio.core and sorted dependencies"""
    + 70        for req_channel in ("conda-forge", "nodefaults"):
    + 71            if req_channel not in self.channels:
    + 72                self.channels.append(req_channel)
    + 73
    + 74        if "defaults" in self.channels:
    + 75            warnings.warn("removing 'defaults' from conda-channels")
    + 76            self.channels.remove("defaults")
    + 77
    + 78        if "pip" not in self.dependencies:
    + 79            self.dependencies.append("pip")
    + 80
    + 81        for dep in self.dependencies:
    + 82            if isinstance(dep, PipDeps):
    + 83                pip_section = dep
    + 84                pip_section.pip.sort()
    + 85                break
    + 86        else:
    + 87            pip_section = None
    + 88
    + 89        if (
    + 90            pip_section is not None
    + 91            and any(pd.startswith("bioimageio.core") for pd in pip_section.pip)
    + 92        ) and not any(
    + 93            d.startswith("bioimageio.core")
    + 94            or d.startswith("conda-forge::bioimageio.core")
    + 95            for d in self.dependencies
    + 96            if not isinstance(d, PipDeps)
    + 97        ):
    + 98            self.dependencies.append("conda-forge::bioimageio.core")
    + 99
    +100        self.dependencies.sort()
    +101        return self
    +
    + + +

    A special CondaEnv that

    + +
      +
    • automatically adds bioimageio specific dependencies
    • +
    • sorts dependencies
    • +
    +
    + + +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/dataset.html b/bioimageio/spec/dataset.html new file mode 100644 index 00000000..e37fe061 --- /dev/null +++ b/bioimageio/spec/dataset.html @@ -0,0 +1,575 @@ + + + + + + + bioimageio.spec.dataset API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.dataset

    + +

    implementaions of all released minor versions are available in submodules:

    + + +
    + + + + + +
     1# autogen: start
    + 2"""
    + 3implementaions of all released minor versions are available in submodules:
    + 4- dataset v0_2: `bioimageio.spec.dataset.v0_2.DatasetDescr`
    + 5- dataset v0_3: `bioimageio.spec.dataset.v0_3.DatasetDescr`
    + 6"""
    + 7
    + 8from typing import Union
    + 9
    +10from pydantic import Discriminator
    +11from typing_extensions import Annotated
    +12
    +13from . import v0_2, v0_3
    +14
    +15DatasetDescr = v0_3.DatasetDescr
    +16DatasetDescr_v0_2 = v0_2.DatasetDescr
    +17DatasetDescr_v0_3 = v0_3.DatasetDescr
    +18
    +19AnyDatasetDescr = Annotated[
    +20    Union[DatasetDescr_v0_2, DatasetDescr_v0_3], Discriminator("format_version")
    +21]
    +22"""Union of any released dataset desription"""
    +23# autogen: stop
    +
    + + +
    +
    + +
    + + class + DatasetDescr(bioimageio.spec.generic.v0_3.GenericDescrBase): + + + +
    + +
     39class DatasetDescr(GenericDescrBase, title="bioimage.io dataset specification"):
    + 40    """A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage
    + 41    processing.
    + 42    """
    + 43
    + 44    type: Literal["dataset"] = "dataset"
    + 45
    + 46    id: Optional[DatasetId] = None
    + 47    """bioimage.io-wide unique resource identifier
    + 48    assigned by bioimage.io; version **un**specific."""
    + 49
    + 50    parent: Optional[DatasetId] = None
    + 51    """The description from which this one is derived"""
    + 52
    + 53    source: Optional[HttpUrl] = None
    + 54    """"URL to the source of the dataset."""
    + 55
    + 56    @model_validator(mode="before")
    + 57    @classmethod
    + 58    def _convert(cls, data: Dict[str, Any], /) -> Dict[str, Any]:
    + 59        if (
    + 60            data.get("type") == "dataset"
    + 61            and isinstance(fv := data.get("format_version"), str)
    + 62            and fv.startswith("0.2.")
    + 63        ):
    + 64            old = DatasetDescr02.load(data)
    + 65            if isinstance(old, InvalidDescr):
    + 66                return data
    + 67
    + 68            return cast(
    + 69                Dict[str, Any],
    + 70                (cls if TYPE_CHECKING else dict)(
    + 71                    attachments=(
    + 72                        []
    + 73                        if old.attachments is None
    + 74                        else [FileDescr(source=f) for f in old.attachments.files]
    + 75                    ),
    + 76                    authors=[
    + 77                        _author_conv.convert_as_dict(a) for a in old.authors
    + 78                    ],  # pyright: ignore[reportArgumentType]
    + 79                    badges=old.badges,
    + 80                    cite=[
    + 81                        {"text": c.text, "doi": c.doi, "url": c.url} for c in old.cite
    + 82                    ],  # pyright: ignore[reportArgumentType]
    + 83                    config=old.config,
    + 84                    covers=old.covers,
    + 85                    description=old.description,
    + 86                    documentation=cast(DocumentationSource, old.documentation),
    + 87                    format_version="0.3.0",
    + 88                    git_repo=old.git_repo,  # pyright: ignore[reportArgumentType]
    + 89                    icon=old.icon,
    + 90                    id=None if old.id is None else DatasetId(old.id),
    + 91                    license=old.license,  # type: ignore
    + 92                    links=old.links,
    + 93                    maintainers=[
    + 94                        _maintainer_conv.convert_as_dict(m) for m in old.maintainers
    + 95                    ],  # pyright: ignore[reportArgumentType]
    + 96                    name=old.name,
    + 97                    source=old.source,
    + 98                    tags=old.tags,
    + 99                    type=old.type,
    +100                    uploader=old.uploader,
    +101                    version=old.version,
    +102                    **(old.model_extra or {}),
    +103                ),
    +104            )
    +105
    +106        return data
    +
    + + +

    A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage +processing.

    +
    + + +
    +
    + type: Literal['dataset'] + + +
    + + + + +
    +
    + + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    +
    + parent: Optional[bioimageio.spec.dataset.v0_3.DatasetId] + + +
    + + +

    The description from which this one is derived

    +
    + + +
    +
    +
    + source: Optional[bioimageio.spec._internal.url.HttpUrl] + + +
    + + +

    "URL to the source of the dataset.

    +
    + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.3.0' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 3, 0) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    +
    + DatasetDescr_v0_2 = +<class 'bioimageio.spec.dataset.v0_2.DatasetDescr'> + + +
    + + + + +
    +
    +
    + DatasetDescr_v0_3 = +<class 'DatasetDescr'> + + +
    + + + + +
    +
    +
    + AnyDatasetDescr = + + typing.Annotated[typing.Union[bioimageio.spec.dataset.v0_2.DatasetDescr, DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + +

    Union of any released dataset desription

    +
    + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/dataset/v0_2.html b/bioimageio/spec/dataset/v0_2.html new file mode 100644 index 00000000..90269b6c --- /dev/null +++ b/bioimageio/spec/dataset/v0_2.html @@ -0,0 +1,582 @@ + + + + + + + bioimageio.spec.dataset.v0_2 API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.dataset.v0_2

    + + + + + + +
     1from typing import Literal, Optional
    + 2
    + 3from .._internal.common_nodes import Node
    + 4from .._internal.io_basics import AbsoluteFilePath as AbsoluteFilePath
    + 5from .._internal.url import HttpUrl as HttpUrl
    + 6from ..generic.v0_2 import VALID_COVER_IMAGE_EXTENSIONS as VALID_COVER_IMAGE_EXTENSIONS
    + 7from ..generic.v0_2 import AttachmentsDescr as AttachmentsDescr
    + 8from ..generic.v0_2 import Author as Author
    + 9from ..generic.v0_2 import BadgeDescr as BadgeDescr
    +10from ..generic.v0_2 import CiteEntry as CiteEntry
    +11from ..generic.v0_2 import Doi as Doi
    +12from ..generic.v0_2 import GenericDescrBase, ResourceId
    +13from ..generic.v0_2 import LinkedResource as LinkedResource
    +14from ..generic.v0_2 import Maintainer as Maintainer
    +15from ..generic.v0_2 import OrcidId as OrcidId
    +16from ..generic.v0_2 import RelativeFilePath as RelativeFilePath
    +17from ..generic.v0_2 import Uploader as Uploader
    +18from ..generic.v0_2 import Version as Version
    +19
    +20
    +21class DatasetId(ResourceId):
    +22    pass
    +23
    +24
    +25class DatasetDescr(GenericDescrBase, title="bioimage.io dataset specification"):
    +26    """A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage
    +27    processing.
    +28    """
    +29
    +30    type: Literal["dataset"] = "dataset"
    +31
    +32    id: Optional[DatasetId] = None
    +33    """bioimage.io-wide unique resource identifier
    +34    assigned by bioimage.io; version **un**specific."""
    +35
    +36    source: Optional[HttpUrl] = None
    +37    """"URL to the source of the dataset."""
    +38
    +39
    +40class LinkedDataset(Node):
    +41    """Reference to a bioimage.io dataset."""
    +42
    +43    id: DatasetId
    +44    """A valid dataset `id` from the bioimage.io collection."""
    +45
    +46    version_number: Optional[int] = None
    +47    """version number (n-th published version, not the semantic version) of linked dataset"""
    +
    + + +
    +
    + +
    + + class + DatasetId(bioimageio.spec.generic.v0_2.ResourceId): + + + +
    + +
    22class DatasetId(ResourceId):
    +23    pass
    +
    + + +

    str(object='') -> str +str(bytes_or_buffer[, encoding[, errors]]) -> str

    + +

    Create a new string object from the given object. If encoding or +errors is specified, then the object must expose a data buffer +that will be decoded using the given encoding and error handler. +Otherwise, returns the result of object.__str__() (if defined) +or repr(object). +encoding defaults to sys.getdefaultencoding(). +errors defaults to 'strict'.

    +
    + + +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + DatasetDescr(bioimageio.spec.generic.v0_2.GenericDescrBase): + + + +
    + +
    26class DatasetDescr(GenericDescrBase, title="bioimage.io dataset specification"):
    +27    """A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage
    +28    processing.
    +29    """
    +30
    +31    type: Literal["dataset"] = "dataset"
    +32
    +33    id: Optional[DatasetId] = None
    +34    """bioimage.io-wide unique resource identifier
    +35    assigned by bioimage.io; version **un**specific."""
    +36
    +37    source: Optional[HttpUrl] = None
    +38    """"URL to the source of the dataset."""
    +
    + + +

    A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage +processing.

    +
    + + +
    +
    + type: Literal['dataset'] + + +
    + + + + +
    +
    +
    + id: Optional[DatasetId] + + +
    + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    +
    + source: Optional[bioimageio.spec._internal.url.HttpUrl] + + +
    + + +

    "URL to the source of the dataset.

    +
    + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.2.4' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 2, 4) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    + +
    + + class + LinkedDataset(bioimageio.spec._internal.node.Node): + + + +
    + +
    41class LinkedDataset(Node):
    +42    """Reference to a bioimage.io dataset."""
    +43
    +44    id: DatasetId
    +45    """A valid dataset `id` from the bioimage.io collection."""
    +46
    +47    version_number: Optional[int] = None
    +48    """version number (n-th published version, not the semantic version) of linked dataset"""
    +
    + + +

    Reference to a bioimage.io dataset.

    +
    + + +
    +
    + id: DatasetId + + +
    + + +

    A valid dataset id from the bioimage.io collection.

    +
    + + +
    +
    +
    + version_number: Optional[int] + + +
    + + +

    version number (n-th published version, not the semantic version) of linked dataset

    +
    + + +
    +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/dataset/v0_3.html b/bioimageio/spec/dataset/v0_3.html new file mode 100644 index 00000000..8c8f6d72 --- /dev/null +++ b/bioimageio/spec/dataset/v0_3.html @@ -0,0 +1,704 @@ + + + + + + + bioimageio.spec.dataset.v0_3 API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.dataset.v0_3

    + + + + + + +
      1from typing import TYPE_CHECKING, Any, Dict, Literal, Optional, cast
    +  2
    +  3from pydantic import model_validator
    +  4
    +  5from .._internal.common_nodes import InvalidDescr
    +  6from .._internal.io import FileDescr as FileDescr
    +  7from .._internal.io_basics import AbsoluteFilePath as AbsoluteFilePath
    +  8from .._internal.io_basics import Sha256 as Sha256
    +  9from .._internal.url import HttpUrl as HttpUrl
    + 10from ..generic.v0_3 import VALID_COVER_IMAGE_EXTENSIONS as VALID_COVER_IMAGE_EXTENSIONS
    + 11from ..generic.v0_3 import Author as Author
    + 12from ..generic.v0_3 import BadgeDescr as BadgeDescr
    + 13from ..generic.v0_3 import CiteEntry as CiteEntry
    + 14from ..generic.v0_3 import DeprecatedLicenseId as DeprecatedLicenseId
    + 15from ..generic.v0_3 import (
    + 16    DocumentationSource,
    + 17    GenericDescrBase,
    + 18    LinkedResourceNode,
    + 19    _author_conv,  # pyright: ignore[reportPrivateUsage]
    + 20    _maintainer_conv,  # pyright: ignore[reportPrivateUsage]
    + 21)
    + 22from ..generic.v0_3 import Doi as Doi
    + 23from ..generic.v0_3 import LicenseId as LicenseId
    + 24from ..generic.v0_3 import LinkedResource as LinkedResource
    + 25from ..generic.v0_3 import Maintainer as Maintainer
    + 26from ..generic.v0_3 import OrcidId as OrcidId
    + 27from ..generic.v0_3 import RelativeFilePath as RelativeFilePath
    + 28from ..generic.v0_3 import ResourceId as ResourceId
    + 29from ..generic.v0_3 import Uploader as Uploader
    + 30from ..generic.v0_3 import Version as Version
    + 31from .v0_2 import DatasetDescr as DatasetDescr02
    + 32
    + 33
    + 34class DatasetId(ResourceId):
    + 35    pass
    + 36
    + 37
    + 38class DatasetDescr(GenericDescrBase, title="bioimage.io dataset specification"):
    + 39    """A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage
    + 40    processing.
    + 41    """
    + 42
    + 43    type: Literal["dataset"] = "dataset"
    + 44
    + 45    id: Optional[DatasetId] = None
    + 46    """bioimage.io-wide unique resource identifier
    + 47    assigned by bioimage.io; version **un**specific."""
    + 48
    + 49    parent: Optional[DatasetId] = None
    + 50    """The description from which this one is derived"""
    + 51
    + 52    source: Optional[HttpUrl] = None
    + 53    """"URL to the source of the dataset."""
    + 54
    + 55    @model_validator(mode="before")
    + 56    @classmethod
    + 57    def _convert(cls, data: Dict[str, Any], /) -> Dict[str, Any]:
    + 58        if (
    + 59            data.get("type") == "dataset"
    + 60            and isinstance(fv := data.get("format_version"), str)
    + 61            and fv.startswith("0.2.")
    + 62        ):
    + 63            old = DatasetDescr02.load(data)
    + 64            if isinstance(old, InvalidDescr):
    + 65                return data
    + 66
    + 67            return cast(
    + 68                Dict[str, Any],
    + 69                (cls if TYPE_CHECKING else dict)(
    + 70                    attachments=(
    + 71                        []
    + 72                        if old.attachments is None
    + 73                        else [FileDescr(source=f) for f in old.attachments.files]
    + 74                    ),
    + 75                    authors=[
    + 76                        _author_conv.convert_as_dict(a) for a in old.authors
    + 77                    ],  # pyright: ignore[reportArgumentType]
    + 78                    badges=old.badges,
    + 79                    cite=[
    + 80                        {"text": c.text, "doi": c.doi, "url": c.url} for c in old.cite
    + 81                    ],  # pyright: ignore[reportArgumentType]
    + 82                    config=old.config,
    + 83                    covers=old.covers,
    + 84                    description=old.description,
    + 85                    documentation=cast(DocumentationSource, old.documentation),
    + 86                    format_version="0.3.0",
    + 87                    git_repo=old.git_repo,  # pyright: ignore[reportArgumentType]
    + 88                    icon=old.icon,
    + 89                    id=None if old.id is None else DatasetId(old.id),
    + 90                    license=old.license,  # type: ignore
    + 91                    links=old.links,
    + 92                    maintainers=[
    + 93                        _maintainer_conv.convert_as_dict(m) for m in old.maintainers
    + 94                    ],  # pyright: ignore[reportArgumentType]
    + 95                    name=old.name,
    + 96                    source=old.source,
    + 97                    tags=old.tags,
    + 98                    type=old.type,
    + 99                    uploader=old.uploader,
    +100                    version=old.version,
    +101                    **(old.model_extra or {}),
    +102                ),
    +103            )
    +104
    +105        return data
    +106
    +107
    +108class LinkedDataset(LinkedResourceNode):
    +109    """Reference to a bioimage.io dataset."""
    +110
    +111    id: DatasetId
    +112    """A valid dataset `id` from the bioimage.io collection."""
    +
    + + +
    +
    + +
    + + class + DatasetId(bioimageio.spec.generic.v0_3.ResourceId): + + + +
    + +
    35class DatasetId(ResourceId):
    +36    pass
    +
    + + +

    str(object='') -> str +str(bytes_or_buffer[, encoding[, errors]]) -> str

    + +

    Create a new string object from the given object. If encoding or +errors is specified, then the object must expose a data buffer +that will be decoded using the given encoding and error handler. +Otherwise, returns the result of object.__str__() (if defined) +or repr(object). +encoding defaults to sys.getdefaultencoding(). +errors defaults to 'strict'.

    +
    + + +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + DatasetDescr(bioimageio.spec.generic.v0_3.GenericDescrBase): + + + +
    + +
     39class DatasetDescr(GenericDescrBase, title="bioimage.io dataset specification"):
    + 40    """A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage
    + 41    processing.
    + 42    """
    + 43
    + 44    type: Literal["dataset"] = "dataset"
    + 45
    + 46    id: Optional[DatasetId] = None
    + 47    """bioimage.io-wide unique resource identifier
    + 48    assigned by bioimage.io; version **un**specific."""
    + 49
    + 50    parent: Optional[DatasetId] = None
    + 51    """The description from which this one is derived"""
    + 52
    + 53    source: Optional[HttpUrl] = None
    + 54    """"URL to the source of the dataset."""
    + 55
    + 56    @model_validator(mode="before")
    + 57    @classmethod
    + 58    def _convert(cls, data: Dict[str, Any], /) -> Dict[str, Any]:
    + 59        if (
    + 60            data.get("type") == "dataset"
    + 61            and isinstance(fv := data.get("format_version"), str)
    + 62            and fv.startswith("0.2.")
    + 63        ):
    + 64            old = DatasetDescr02.load(data)
    + 65            if isinstance(old, InvalidDescr):
    + 66                return data
    + 67
    + 68            return cast(
    + 69                Dict[str, Any],
    + 70                (cls if TYPE_CHECKING else dict)(
    + 71                    attachments=(
    + 72                        []
    + 73                        if old.attachments is None
    + 74                        else [FileDescr(source=f) for f in old.attachments.files]
    + 75                    ),
    + 76                    authors=[
    + 77                        _author_conv.convert_as_dict(a) for a in old.authors
    + 78                    ],  # pyright: ignore[reportArgumentType]
    + 79                    badges=old.badges,
    + 80                    cite=[
    + 81                        {"text": c.text, "doi": c.doi, "url": c.url} for c in old.cite
    + 82                    ],  # pyright: ignore[reportArgumentType]
    + 83                    config=old.config,
    + 84                    covers=old.covers,
    + 85                    description=old.description,
    + 86                    documentation=cast(DocumentationSource, old.documentation),
    + 87                    format_version="0.3.0",
    + 88                    git_repo=old.git_repo,  # pyright: ignore[reportArgumentType]
    + 89                    icon=old.icon,
    + 90                    id=None if old.id is None else DatasetId(old.id),
    + 91                    license=old.license,  # type: ignore
    + 92                    links=old.links,
    + 93                    maintainers=[
    + 94                        _maintainer_conv.convert_as_dict(m) for m in old.maintainers
    + 95                    ],  # pyright: ignore[reportArgumentType]
    + 96                    name=old.name,
    + 97                    source=old.source,
    + 98                    tags=old.tags,
    + 99                    type=old.type,
    +100                    uploader=old.uploader,
    +101                    version=old.version,
    +102                    **(old.model_extra or {}),
    +103                ),
    +104            )
    +105
    +106        return data
    +
    + + +

    A bioimage.io dataset resource description file (dataset RDF) describes a dataset relevant to bioimage +processing.

    +
    + + +
    +
    + type: Literal['dataset'] + + +
    + + + + +
    +
    +
    + id: Optional[DatasetId] + + +
    + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    +
    + parent: Optional[DatasetId] + + +
    + + +

    The description from which this one is derived

    +
    + + +
    +
    +
    + source: Optional[bioimageio.spec._internal.url.HttpUrl] + + +
    + + +

    "URL to the source of the dataset.

    +
    + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.3.0' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 3, 0) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    + +
    + + class + LinkedDataset(bioimageio.spec.generic.v0_3.LinkedResourceNode): + + + +
    + +
    109class LinkedDataset(LinkedResourceNode):
    +110    """Reference to a bioimage.io dataset."""
    +111
    +112    id: DatasetId
    +113    """A valid dataset `id` from the bioimage.io collection."""
    +
    + + +

    Reference to a bioimage.io dataset.

    +
    + + +
    +
    + id: DatasetId + + +
    + + +

    A valid dataset id from the bioimage.io collection.

    +
    + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/generic.html b/bioimageio/spec/generic.html new file mode 100644 index 00000000..cc8a4000 --- /dev/null +++ b/bioimageio/spec/generic.html @@ -0,0 +1,577 @@ + + + + + + + bioimageio.spec.generic API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.generic

    + +

    implementaions of all released minor versions are available in submodules:

    + + +
    + + + + + +
     1# autogen: start
    + 2"""
    + 3implementaions of all released minor versions are available in submodules:
    + 4- generic v0_2: `bioimageio.spec.generic.v0_2.GenericDescr`
    + 5- generic v0_3: `bioimageio.spec.generic.v0_3.GenericDescr`
    + 6"""
    + 7
    + 8from typing import Union
    + 9
    +10from pydantic import Discriminator
    +11from typing_extensions import Annotated
    +12
    +13from . import v0_2, v0_3
    +14
    +15GenericDescr = v0_3.GenericDescr
    +16GenericDescr_v0_2 = v0_2.GenericDescr
    +17GenericDescr_v0_3 = v0_3.GenericDescr
    +18
    +19AnyGenericDescr = Annotated[
    +20    Union[GenericDescr_v0_2, GenericDescr_v0_3], Discriminator("format_version")
    +21]
    +22"""Union of any released generic desription"""
    +23# autogen: stop
    +
    + + +
    +
    + +
    + + class + GenericDescr(bioimageio.spec.generic.v0_3.GenericDescrBase): + + + +
    + +
    410class GenericDescr(
    +411    GenericDescrBase, extra="ignore", title="bioimage.io generic specification"
    +412):
    +413    """Specification of the fields used in a generic bioimage.io-compliant resource description file (RDF).
    +414
    +415    An RDF is a YAML file that describes a resource such as a model, a dataset, or a notebook.
    +416    Note that those resources are described with a type-specific RDF.
    +417    Use this generic resource description, if none of the known specific types matches your resource.
    +418    """
    +419
    +420    type: Annotated[str, LowerCase] = Field("generic", frozen=True)
    +421    """The resource type assigns a broad category to the resource."""
    +422
    +423    id: Optional[ResourceId] = None
    +424    """bioimage.io-wide unique resource identifier
    +425    assigned by bioimage.io; version **un**specific."""
    +426
    +427    parent: Optional[ResourceId] = None
    +428    """The description from which this one is derived"""
    +429
    +430    source: Optional[HttpUrl] = None
    +431    """The primary source of the resource"""
    +432
    +433    @field_validator("type", mode="after")
    +434    @classmethod
    +435    def check_specific_types(cls, value: str) -> str:
    +436        if value in KNOWN_SPECIFIC_RESOURCE_TYPES:
    +437            raise ValueError(
    +438                f"Use the {value} description instead of this generic description for"
    +439                + f" your '{value}' resource."
    +440            )
    +441
    +442        return value
    +
    + + +

    Specification of the fields used in a generic bioimage.io-compliant resource description file (RDF).

    + +

    An RDF is a YAML file that describes a resource such as a model, a dataset, or a notebook. +Note that those resources are described with a type-specific RDF. +Use this generic resource description, if none of the known specific types matches your resource.

    +
    + + +
    +
    + type: Annotated[str, Annotated[~_StrType, Predicate(str.islower)]] + + +
    + + +

    The resource type assigns a broad category to the resource.

    +
    + + +
    +
    + + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    +
    + parent: Optional[bioimageio.spec.generic.v0_3.ResourceId] + + +
    + + +

    The description from which this one is derived

    +
    + + +
    +
    +
    + source: Optional[bioimageio.spec._internal.url.HttpUrl] + + +
    + + +

    The primary source of the resource

    +
    + + +
    +
    + +
    +
    @field_validator('type', mode='after')
    +
    @classmethod
    + + def + check_specific_types(cls, value: str) -> str: + + + +
    + +
    433    @field_validator("type", mode="after")
    +434    @classmethod
    +435    def check_specific_types(cls, value: str) -> str:
    +436        if value in KNOWN_SPECIFIC_RESOURCE_TYPES:
    +437            raise ValueError(
    +438                f"Use the {value} description instead of this generic description for"
    +439                + f" your '{value}' resource."
    +440            )
    +441
    +442        return value
    +
    + + + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.3.0' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 3, 0) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    +
    + GenericDescr_v0_2 = +<class 'bioimageio.spec.generic.v0_2.GenericDescr'> + + +
    + + + + +
    +
    +
    + GenericDescr_v0_3 = +<class 'GenericDescr'> + + +
    + + + + +
    +
    +
    + AnyGenericDescr = + + typing.Annotated[typing.Union[bioimageio.spec.generic.v0_2.GenericDescr, GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + +

    Union of any released generic desription

    +
    + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/generic/_v0_2_converter.html b/bioimageio/spec/generic/_v0_2_converter.html new file mode 100644 index 00000000..45670694 --- /dev/null +++ b/bioimageio/spec/generic/_v0_2_converter.html @@ -0,0 +1,561 @@ + + + + + + + bioimageio.spec.generic._v0_2_converter API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.generic._v0_2_converter

    + + + + + + +
      1import collections.abc
    +  2from typing import Any, Dict, Mapping, Union
    +  3
    +  4from .._internal.io import BioimageioYamlContent
    +  5from .._internal.type_guards import is_mapping
    +  6
    +  7
    +  8def convert_from_older_format(data: BioimageioYamlContent) -> None:
    +  9    """convert raw RDF data of an older format where possible"""
    + 10    # check if we have future format version
    + 11    if "format_version" not in data:
    + 12        return
    + 13
    + 14    fv = data["format_version"]
    + 15    if isinstance(fv, str) and tuple(map(int, fv.split(".")[:2])) > (0, 2):
    + 16        return
    + 17
    + 18    # we unofficially accept strings as author entries
    + 19    authors = data.get("authors")
    + 20    if isinstance(authors, list):
    + 21        data["authors"] = [{"name": a} if isinstance(a, str) else a for a in authors]
    + 22
    + 23    if data.get("format_version") in ("0.2.0", "0.2.1"):
    + 24        data["format_version"] = "0.2.2"
    + 25
    + 26    if data.get("format_version") == "0.2.2":
    + 27        remove_slashes_from_names(data)
    + 28        data["format_version"] = "0.2.3"
    + 29
    + 30    if data.get("format_version") == "0.2.3":
    + 31        if isinstance(config := data.get("config"), dict) and isinstance(
    + 32            bconfig := config.get("bioimageio"), dict
    + 33        ):
    + 34            if (nickname := bconfig.get("nickname")) is not None:
    + 35                data["id"] = nickname
    + 36
    + 37            if (nickname_icon := bconfig.get("nickname_icon")) is not None:
    + 38                data["id_emoji"] = nickname_icon
    + 39
    + 40        data["format_version"] = "0.2.4"
    + 41
    + 42    remove_doi_prefix(data)
    + 43    remove_gh_prefix(data)
    + 44
    + 45
    + 46def remove_slashes_from_names(data: Dict[Any, Any]) -> None:
    + 47    NAME = "name"
    + 48    if NAME in data and isinstance(data[NAME], str):
    + 49        data[NAME] = data[NAME].replace("/", "").replace("\\", "")
    + 50
    + 51    # update authors and maintainers
    + 52    def rm_slashes_in_person_name(
    + 53        person: Union[Any, Mapping[Union[Any, str], Any]],
    + 54    ) -> Any:
    + 55        if not is_mapping(person):
    + 56            return person
    + 57
    + 58        new_person = dict(person)
    + 59        if isinstance(n := person.get(NAME), str):
    + 60            new_person[NAME] = n.replace("/", "").replace("\\", "")
    + 61
    + 62        return new_person
    + 63
    + 64    for group in ("authors", "maintainers"):
    + 65        persons = data.get(group)
    + 66        if isinstance(persons, collections.abc.Sequence):
    + 67            data[group] = [rm_slashes_in_person_name(p) for p in persons]  # type: ignore
    + 68
    + 69
    + 70DOI_PREFIXES = ("https://doi.org/", "http://dx.doi.org/")
    + 71
    + 72
    + 73def remove_doi_prefix(data: BioimageioYamlContent) -> None:
    + 74    """we unofficially accept DOIs starting with "https://doi.org/" here we remove this prefix"""
    + 75    cite = data.get("cite")
    + 76    if isinstance(cite, collections.abc.Sequence):
    + 77        new_cite = list(cite)
    + 78        for i in range(len(new_cite)):
    + 79            cite_entry = new_cite[i]
    + 80            if not isinstance(cite_entry, collections.abc.Mapping):
    + 81                continue
    + 82
    + 83            doi = cite_entry.get("doi")
    + 84            if not isinstance(doi, str):
    + 85                continue
    + 86
    + 87            for doi_prefix in DOI_PREFIXES:
    + 88                if doi.startswith(doi_prefix):
    + 89                    doi = doi[len(doi_prefix) :]
    + 90                    break
    + 91            else:
    + 92                continue
    + 93
    + 94            new_cite_entry = dict(cite_entry)
    + 95            new_cite_entry["doi"] = doi
    + 96            new_cite[i] = new_cite_entry
    + 97
    + 98        data["cite"] = new_cite
    + 99
    +100
    +101def remove_gh_prefix(data: BioimageioYamlContent) -> None:
    +102    def rm_gh(field_name: str):
    +103        authors = data.get(field_name)
    +104        if not isinstance(authors, list):
    +105            return
    +106
    +107        for a in authors:
    +108            if (
    +109                isinstance(a, dict)
    +110                and "github_user" in a
    +111                and isinstance(a["github_user"], str)
    +112                and a["github_user"].startswith("https://github.com/")
    +113            ):
    +114                a["github_user"] = a["github_user"][len("https://github.com/") :]
    +115
    +116    rm_gh("authors")
    +117    rm_gh("maintainers")
    +
    + + +
    +
    + +
    + + def + convert_from_older_format(data: Dict[str, YamlValue]) -> None: + + + +
    + +
     9def convert_from_older_format(data: BioimageioYamlContent) -> None:
    +10    """convert raw RDF data of an older format where possible"""
    +11    # check if we have future format version
    +12    if "format_version" not in data:
    +13        return
    +14
    +15    fv = data["format_version"]
    +16    if isinstance(fv, str) and tuple(map(int, fv.split(".")[:2])) > (0, 2):
    +17        return
    +18
    +19    # we unofficially accept strings as author entries
    +20    authors = data.get("authors")
    +21    if isinstance(authors, list):
    +22        data["authors"] = [{"name": a} if isinstance(a, str) else a for a in authors]
    +23
    +24    if data.get("format_version") in ("0.2.0", "0.2.1"):
    +25        data["format_version"] = "0.2.2"
    +26
    +27    if data.get("format_version") == "0.2.2":
    +28        remove_slashes_from_names(data)
    +29        data["format_version"] = "0.2.3"
    +30
    +31    if data.get("format_version") == "0.2.3":
    +32        if isinstance(config := data.get("config"), dict) and isinstance(
    +33            bconfig := config.get("bioimageio"), dict
    +34        ):
    +35            if (nickname := bconfig.get("nickname")) is not None:
    +36                data["id"] = nickname
    +37
    +38            if (nickname_icon := bconfig.get("nickname_icon")) is not None:
    +39                data["id_emoji"] = nickname_icon
    +40
    +41        data["format_version"] = "0.2.4"
    +42
    +43    remove_doi_prefix(data)
    +44    remove_gh_prefix(data)
    +
    + + +

    convert raw RDF data of an older format where possible

    +
    + + +
    +
    + +
    + + def + remove_slashes_from_names(data: Dict[Any, Any]) -> None: + + + +
    + +
    47def remove_slashes_from_names(data: Dict[Any, Any]) -> None:
    +48    NAME = "name"
    +49    if NAME in data and isinstance(data[NAME], str):
    +50        data[NAME] = data[NAME].replace("/", "").replace("\\", "")
    +51
    +52    # update authors and maintainers
    +53    def rm_slashes_in_person_name(
    +54        person: Union[Any, Mapping[Union[Any, str], Any]],
    +55    ) -> Any:
    +56        if not is_mapping(person):
    +57            return person
    +58
    +59        new_person = dict(person)
    +60        if isinstance(n := person.get(NAME), str):
    +61            new_person[NAME] = n.replace("/", "").replace("\\", "")
    +62
    +63        return new_person
    +64
    +65    for group in ("authors", "maintainers"):
    +66        persons = data.get(group)
    +67        if isinstance(persons, collections.abc.Sequence):
    +68            data[group] = [rm_slashes_in_person_name(p) for p in persons]  # type: ignore
    +
    + + + + +
    +
    +
    + DOI_PREFIXES = +('https://doi.org/', 'http://dx.doi.org/') + + +
    + + + + +
    +
    + +
    + + def + remove_doi_prefix(data: Dict[str, YamlValue]) -> None: + + + +
    + +
    74def remove_doi_prefix(data: BioimageioYamlContent) -> None:
    +75    """we unofficially accept DOIs starting with "https://doi.org/" here we remove this prefix"""
    +76    cite = data.get("cite")
    +77    if isinstance(cite, collections.abc.Sequence):
    +78        new_cite = list(cite)
    +79        for i in range(len(new_cite)):
    +80            cite_entry = new_cite[i]
    +81            if not isinstance(cite_entry, collections.abc.Mapping):
    +82                continue
    +83
    +84            doi = cite_entry.get("doi")
    +85            if not isinstance(doi, str):
    +86                continue
    +87
    +88            for doi_prefix in DOI_PREFIXES:
    +89                if doi.startswith(doi_prefix):
    +90                    doi = doi[len(doi_prefix) :]
    +91                    break
    +92            else:
    +93                continue
    +94
    +95            new_cite_entry = dict(cite_entry)
    +96            new_cite_entry["doi"] = doi
    +97            new_cite[i] = new_cite_entry
    +98
    +99        data["cite"] = new_cite
    +
    + + +

    we unofficially accept DOIs starting with "https://doi.org/" here we remove this prefix

    +
    + + +
    +
    + +
    + + def + remove_gh_prefix(data: Dict[str, YamlValue]) -> None: + + + +
    + +
    102def remove_gh_prefix(data: BioimageioYamlContent) -> None:
    +103    def rm_gh(field_name: str):
    +104        authors = data.get(field_name)
    +105        if not isinstance(authors, list):
    +106            return
    +107
    +108        for a in authors:
    +109            if (
    +110                isinstance(a, dict)
    +111                and "github_user" in a
    +112                and isinstance(a["github_user"], str)
    +113                and a["github_user"].startswith("https://github.com/")
    +114            ):
    +115                a["github_user"] = a["github_user"][len("https://github.com/") :]
    +116
    +117    rm_gh("authors")
    +118    rm_gh("maintainers")
    +
    + + + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/generic/_v0_3_converter.html b/bioimageio/spec/generic/_v0_3_converter.html new file mode 100644 index 00000000..31dc9bdd --- /dev/null +++ b/bioimageio/spec/generic/_v0_3_converter.html @@ -0,0 +1,431 @@ + + + + + + + bioimageio.spec.generic._v0_3_converter API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.generic._v0_3_converter

    + + + + + + +
     1import collections.abc
    + 2import string
    + 3from pathlib import Path
    + 4
    + 5import imageio
    + 6from loguru import logger
    + 7
    + 8from .._internal.io import (
    + 9    BioimageioYamlContent,
    +10    extract_file_name,
    +11    interprete_file_source,
    +12)
    +13from ._v0_2_converter import convert_from_older_format as convert_from_older_format_v0_2
    +14
    +15
    +16def convert_from_older_format(data: BioimageioYamlContent) -> None:
    +17    """convert raw RDF data of an older format where possible"""
    +18    # check if we have future format version
    +19    fv = data.get("format_version", "0.2.0")
    +20    if (
    +21        not isinstance(fv, str)
    +22        or fv.count(".") != 2
    +23        or tuple(map(int, fv.split(".")[:2])) > (0, 3)
    +24    ):
    +25        return
    +26
    +27    convert_from_older_format_v0_2(data)
    +28
    +29    convert_attachments(data)
    +30    convert_cover_images(data)
    +31
    +32    _ = data.pop("download_url", None)
    +33    _ = data.pop("rdf_source", None)
    +34
    +35    if "name" in data and isinstance(data["name"], str):
    +36        data["name"] = "".join(
    +37            c if c in string.ascii_letters + string.digits + "_- ()" else " "
    +38            for c in data["name"]
    +39        )[:128]
    +40
    +41    data["format_version"] = "0.3.0"
    +42
    +43
    +44def convert_attachments(data: BioimageioYamlContent) -> None:
    +45    a = data.get("attachments")
    +46    if isinstance(a, collections.abc.Mapping):
    +47        data["attachments"] = tuple({"source": file} for file in a.get("files", []))  # type: ignore
    +48
    +49
    +50def convert_cover_images(data: BioimageioYamlContent) -> None:
    +51    covers = data.get("covers")
    +52    if not isinstance(covers, list):
    +53        return
    +54
    +55    for i in range(len(covers)):
    +56        c = covers[i]
    +57        if not isinstance(c, str):
    +58            continue
    +59
    +60        src = interprete_file_source(c)
    +61        fname = extract_file_name(src)
    +62
    +63        if not (fname.endswith(".tif") or fname.endswith(".tiff")):
    +64            continue
    +65
    +66        try:
    +67            image = imageio.imread(c)
    +68            c_path = (Path(".bioimageio_converter_cache") / fname).with_suffix(".png")
    +69            imageio.imwrite(c_path, image)
    +70            covers[i] = str(c_path.absolute())
    +71        except Exception as e:
    +72            logger.warning("failed to convert tif cover image: {}", e)
    +
    + + +
    +
    + +
    + + def + convert_from_older_format(data: Dict[str, YamlValue]) -> None: + + + +
    + +
    17def convert_from_older_format(data: BioimageioYamlContent) -> None:
    +18    """convert raw RDF data of an older format where possible"""
    +19    # check if we have future format version
    +20    fv = data.get("format_version", "0.2.0")
    +21    if (
    +22        not isinstance(fv, str)
    +23        or fv.count(".") != 2
    +24        or tuple(map(int, fv.split(".")[:2])) > (0, 3)
    +25    ):
    +26        return
    +27
    +28    convert_from_older_format_v0_2(data)
    +29
    +30    convert_attachments(data)
    +31    convert_cover_images(data)
    +32
    +33    _ = data.pop("download_url", None)
    +34    _ = data.pop("rdf_source", None)
    +35
    +36    if "name" in data and isinstance(data["name"], str):
    +37        data["name"] = "".join(
    +38            c if c in string.ascii_letters + string.digits + "_- ()" else " "
    +39            for c in data["name"]
    +40        )[:128]
    +41
    +42    data["format_version"] = "0.3.0"
    +
    + + +

    convert raw RDF data of an older format where possible

    +
    + + +
    +
    + +
    + + def + convert_attachments(data: Dict[str, YamlValue]) -> None: + + + +
    + +
    45def convert_attachments(data: BioimageioYamlContent) -> None:
    +46    a = data.get("attachments")
    +47    if isinstance(a, collections.abc.Mapping):
    +48        data["attachments"] = tuple({"source": file} for file in a.get("files", []))  # type: ignore
    +
    + + + + +
    +
    + +
    + + def + convert_cover_images(data: Dict[str, YamlValue]) -> None: + + + +
    + +
    51def convert_cover_images(data: BioimageioYamlContent) -> None:
    +52    covers = data.get("covers")
    +53    if not isinstance(covers, list):
    +54        return
    +55
    +56    for i in range(len(covers)):
    +57        c = covers[i]
    +58        if not isinstance(c, str):
    +59            continue
    +60
    +61        src = interprete_file_source(c)
    +62        fname = extract_file_name(src)
    +63
    +64        if not (fname.endswith(".tif") or fname.endswith(".tiff")):
    +65            continue
    +66
    +67        try:
    +68            image = imageio.imread(c)
    +69            c_path = (Path(".bioimageio_converter_cache") / fname).with_suffix(".png")
    +70            imageio.imwrite(c_path, image)
    +71            covers[i] = str(c_path.absolute())
    +72        except Exception as e:
    +73            logger.warning("failed to convert tif cover image: {}", e)
    +
    + + + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/generic/v0_2.html b/bioimageio/spec/generic/v0_2.html new file mode 100644 index 00000000..31862555 --- /dev/null +++ b/bioimageio/spec/generic/v0_2.html @@ -0,0 +1,2498 @@ + + + + + + + bioimageio.spec.generic.v0_2 API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.generic.v0_2

    + + + + + + +
      1import string
    +  2from typing import (
    +  3    Any,
    +  4    ClassVar,
    +  5    Dict,
    +  6    List,
    +  7    Literal,
    +  8    Mapping,
    +  9    Optional,
    + 10    Sequence,
    + 11    Type,
    + 12    TypeVar,
    + 13    Union,
    + 14)
    + 15
    + 16import annotated_types
    + 17from annotated_types import Len, LowerCase, MaxLen
    + 18from pydantic import (
    + 19    EmailStr,
    + 20    Field,
    + 21    RootModel,
    + 22    ValidationInfo,
    + 23    field_validator,
    + 24    model_validator,
    + 25)
    + 26from typing_extensions import Annotated, Self, assert_never
    + 27
    + 28from .._internal.common_nodes import Node, ResourceDescrBase
    + 29from .._internal.constants import TAG_CATEGORIES
    + 30from .._internal.field_warning import as_warning, issue_warning, warn
    + 31from .._internal.io import (
    + 32    BioimageioYamlContent,
    + 33    InPackageIfLocalFileSource,
    + 34    WithSuffix,
    + 35    YamlValue,
    + 36    include_in_package_serializer,
    + 37)
    + 38from .._internal.io_basics import AbsoluteFilePath as AbsoluteFilePath
    + 39from .._internal.type_guards import is_sequence
    + 40from .._internal.types import (
    + 41    DeprecatedLicenseId,
    + 42    FileSource,
    + 43    ImportantFileSource,
    + 44    LicenseId,
    + 45    NotEmpty,
    + 46)
    + 47from .._internal.types import Doi as Doi
    + 48from .._internal.types import OrcidId as OrcidId
    + 49from .._internal.types import RelativeFilePath as RelativeFilePath
    + 50from .._internal.url import HttpUrl as HttpUrl
    + 51from .._internal.validated_string import ValidatedString
    + 52from .._internal.validator_annotations import AfterValidator, RestrictCharacters
    + 53from .._internal.version_type import Version as Version
    + 54from ._v0_2_converter import convert_from_older_format as _convert_from_older_format
    + 55
    + 56
    + 57class ResourceId(ValidatedString):
    + 58    root_model: ClassVar[Type[RootModel[Any]]] = RootModel[
    + 59        Annotated[
    + 60            NotEmpty[str],
    + 61            AfterValidator(str.lower),  # convert upper case on the fly
    + 62            RestrictCharacters(string.ascii_lowercase + string.digits + "_-/."),
    + 63            annotated_types.Predicate(
    + 64                lambda s: not (s.startswith("/") or s.endswith("/"))
    + 65            ),
    + 66        ]
    + 67    ]
    + 68
    + 69
    + 70KNOWN_SPECIFIC_RESOURCE_TYPES = (
    + 71    "application",
    + 72    "collection",
    + 73    "dataset",
    + 74    "model",
    + 75    "notebook",
    + 76)
    + 77
    + 78VALID_COVER_IMAGE_EXTENSIONS = (
    + 79    ".gif",
    + 80    ".jpeg",
    + 81    ".jpg",
    + 82    ".png",
    + 83    ".svg",
    + 84    ".tif",
    + 85    ".tiff",
    + 86)
    + 87
    + 88_WithImageSuffix = WithSuffix(VALID_COVER_IMAGE_EXTENSIONS, case_sensitive=False)
    + 89CoverImageSource = Annotated[
    + 90    Union[AbsoluteFilePath, RelativeFilePath, HttpUrl],
    + 91    Field(union_mode="left_to_right"),
    + 92    _WithImageSuffix,
    + 93    include_in_package_serializer,
    + 94]
    + 95
    + 96
    + 97class AttachmentsDescr(Node):
    + 98    model_config = {**Node.model_config, "extra": "allow"}
    + 99    """update pydantic model config to allow additional unknown keys"""
    +100    files: List[ImportantFileSource] = Field(default_factory=list)
    +101    """∈📦 File attachments"""
    +102
    +103
    +104def _remove_slashes(s: str):
    +105    return s.replace("/", "").replace("\\", "")
    +106
    +107
    +108class Uploader(Node):
    +109    email: EmailStr
    +110    """Email"""
    +111    name: Optional[Annotated[str, AfterValidator(_remove_slashes)]] = None
    +112    """name"""
    +113
    +114
    +115class _Person(Node):
    +116    affiliation: Optional[str] = None
    +117    """Affiliation"""
    +118
    +119    email: Optional[EmailStr] = None
    +120    """Email"""
    +121
    +122    orcid: Annotated[Optional[OrcidId], Field(examples=["0000-0001-2345-6789"])] = None
    +123    """An [ORCID iD](https://support.orcid.org/hc/en-us/sections/360001495313-What-is-ORCID
    +124    ) in hyphenated groups of 4 digits, (and [valid](
    +125    https://support.orcid.org/hc/en-us/articles/360006897674-Structure-of-the-ORCID-Identifier
    +126    ) as per ISO 7064 11,2.)
    +127    """
    +128
    +129
    +130class Author(_Person):
    +131    name: Annotated[str, AfterValidator(_remove_slashes)]
    +132    github_user: Optional[str] = None  # TODO: validate github_user
    +133
    +134
    +135class Maintainer(_Person):
    +136    name: Optional[Annotated[str, AfterValidator(_remove_slashes)]] = None
    +137    github_user: str
    +138
    +139
    +140class BadgeDescr(Node, title="Custom badge"):
    +141    """A custom badge"""
    +142
    +143    label: Annotated[str, Field(examples=["Open in Colab"])]
    +144    """badge label to display on hover"""
    +145
    +146    icon: Annotated[
    +147        Optional[InPackageIfLocalFileSource],
    +148        Field(examples=["https://colab.research.google.com/assets/colab-badge.svg"]),
    +149    ] = None
    +150    """badge icon"""
    +151
    +152    url: Annotated[
    +153        HttpUrl,
    +154        Field(
    +155            examples=[
    +156                "https://colab.research.google.com/github/HenriquesLab/ZeroCostDL4Mic/blob/master/Colab_notebooks/U-net_2D_ZeroCostDL4Mic.ipynb"
    +157            ]
    +158        ),
    +159    ]
    +160    """target URL"""
    +161
    +162
    +163class CiteEntry(Node):
    +164    text: str
    +165    """free text description"""
    +166
    +167    doi: Optional[Doi] = None
    +168    """A digital object identifier (DOI) is the prefered citation reference.
    +169    See https://www.doi.org/ for details. (alternatively specify `url`)"""
    +170
    +171    @field_validator("doi", mode="before")
    +172    @classmethod
    +173    def accept_prefixed_doi(cls, doi: Any) -> Any:
    +174        if isinstance(doi, str):
    +175            for doi_prefix in ("https://doi.org/", "http://dx.doi.org/"):
    +176                if doi.startswith(doi_prefix):
    +177                    doi = doi[len(doi_prefix) :]
    +178                    break
    +179
    +180        return doi
    +181
    +182    url: Optional[str] = None
    +183    """URL to cite (preferably specify a `doi` instead)"""
    +184
    +185    @model_validator(mode="after")
    +186    def _check_doi_or_url(self) -> Self:
    +187        if not self.doi and not self.url:
    +188            raise ValueError("Either 'doi' or 'url' is required")
    +189
    +190        return self
    +191
    +192
    +193class LinkedResource(Node):
    +194    """Reference to a bioimage.io resource"""
    +195
    +196    id: ResourceId
    +197    """A valid resource `id` from the bioimage.io collection."""
    +198
    +199    version_number: Optional[int] = None
    +200    """version number (n-th published version, not the semantic version) of linked resource"""
    +201
    +202
    +203class GenericModelDescrBase(ResourceDescrBase):
    +204    """Base for all resource descriptions including of model descriptions"""
    +205
    +206    name: Annotated[NotEmpty[str], warn(MaxLen(128), "Longer than 128 characters.")]
    +207    """A human-friendly name of the resource description"""
    +208
    +209    description: str
    +210
    +211    covers: Annotated[
    +212        List[CoverImageSource],
    +213        Field(
    +214            examples=["cover.png"],
    +215            description=(
    +216                "Cover images. Please use an image smaller than 500KB and an aspect"
    +217                " ratio width to height of 2:1.\nThe supported image formats are:"
    +218                f" {VALID_COVER_IMAGE_EXTENSIONS}"
    +219            ),
    +220        ),
    +221    ] = Field(
    +222        default_factory=list,
    +223    )
    +224    """∈📦 Cover images. Please use an image smaller than 500KB and an aspect ratio width to height of 2:1."""
    +225
    +226    id_emoji: Optional[Annotated[str, Len(min_length=1, max_length=1)]] = None
    +227    """UTF-8 emoji for display alongside the `id`."""
    +228
    +229    authors: List[Author] = Field(default_factory=list)
    +230    """The authors are the creators of the RDF and the primary points of contact."""
    +231
    +232    @field_validator("authors", mode="before")
    +233    @classmethod
    +234    def accept_author_strings(cls, authors: Union[Any, Sequence[Any]]) -> Any:
    +235        """we unofficially accept strings as author entries"""
    +236        if is_sequence(authors):
    +237            authors = [{"name": a} if isinstance(a, str) else a for a in authors]
    +238
    +239        if not authors:
    +240            issue_warning("missing", value=authors, field="authors")
    +241
    +242        return authors
    +243
    +244    attachments: Optional[AttachmentsDescr] = None
    +245    """file and other attachments"""
    +246
    +247    cite: List[CiteEntry] = Field(default_factory=list)
    +248    """citations"""
    +249
    +250    @field_validator("cite", mode="after")
    +251    @classmethod
    +252    def _warn_empty_cite(cls, value: Any):
    +253        if not value:
    +254            issue_warning("missing", value=value, field="cite")
    +255
    +256        return value
    +257
    +258    config: Annotated[
    +259        Dict[str, YamlValue],
    +260        Field(
    +261            examples=[
    +262                dict(
    +263                    bioimageio={
    +264                        "my_custom_key": 3837283,
    +265                        "another_key": {"nested": "value"},
    +266                    },
    +267                    imagej={"macro_dir": "path/to/macro/file"},
    +268                )
    +269            ],
    +270        ),
    +271    ] = Field(default_factory=dict)
    +272    """A field for custom configuration that can contain any keys not present in the RDF spec.
    +273    This means you should not store, for example, a github repo URL in `config` since we already have the
    +274    `git_repo` field defined in the spec.
    +275    Keys in `config` may be very specific to a tool or consumer software. To avoid conflicting definitions,
    +276    it is recommended to wrap added configuration into a sub-field named with the specific domain or tool name,
    +277    for example:
    +278    ```yaml
    +279    config:
    +280        bioimageio:  # here is the domain name
    +281            my_custom_key: 3837283
    +282            another_key:
    +283                nested: value
    +284        imagej:       # config specific to ImageJ
    +285            macro_dir: path/to/macro/file
    +286    ```
    +287    If possible, please use [`snake_case`](https://en.wikipedia.org/wiki/Snake_case) for keys in `config`.
    +288    You may want to list linked files additionally under `attachments` to include them when packaging a resource
    +289    (packaging a resource means downloading/copying important linked files and creating a ZIP archive that contains
    +290    an altered rdf.yaml file with local references to the downloaded files)"""
    +291
    +292    download_url: Optional[HttpUrl] = None
    +293    """URL to download the resource from (deprecated)"""
    +294
    +295    git_repo: Annotated[
    +296        Optional[str],
    +297        Field(
    +298            examples=[
    +299                "https://github.com/bioimage-io/spec-bioimage-io/tree/main/example_descriptions/models/unet2d_nuclei_broad"
    +300            ],
    +301        ),
    +302    ] = None
    +303    """A URL to the Git repository where the resource is being developed."""
    +304
    +305    icon: Union[
    +306        Annotated[str, Len(min_length=1, max_length=2)], ImportantFileSource, None
    +307    ] = None
    +308    """An icon for illustration"""
    +309
    +310    links: Annotated[
    +311        List[str],
    +312        Field(
    +313            examples=[
    +314                (
    +315                    "ilastik/ilastik",
    +316                    "deepimagej/deepimagej",
    +317                    "zero/notebook_u-net_3d_zerocostdl4mic",
    +318                )
    +319            ],
    +320        ),
    +321    ] = Field(default_factory=list)
    +322    """IDs of other bioimage.io resources"""
    +323
    +324    uploader: Optional[Uploader] = None
    +325    """The person who uploaded the model (e.g. to bioimage.io)"""
    +326
    +327    maintainers: List[Maintainer] = Field(default_factory=list)
    +328    """Maintainers of this resource.
    +329    If not specified `authors` are maintainers and at least some of them should specify their `github_user` name"""
    +330
    +331    rdf_source: Optional[FileSource] = None
    +332    """Resource description file (RDF) source; used to keep track of where an rdf.yaml was loaded from.
    +333    Do not set this field in a YAML file."""
    +334
    +335    tags: Annotated[
    +336        List[str],
    +337        Field(examples=[("unet2d", "pytorch", "nucleus", "segmentation", "dsb2018")]),
    +338    ] = Field(default_factory=list)
    +339    """Associated tags"""
    +340
    +341    @as_warning
    +342    @field_validator("tags")
    +343    @classmethod
    +344    def warn_about_tag_categories(
    +345        cls, value: List[str], info: ValidationInfo
    +346    ) -> List[str]:
    +347        categories = TAG_CATEGORIES.get(info.data["type"], {})
    +348        missing_categories: List[Mapping[str, Sequence[str]]] = []
    +349        for cat, entries in categories.items():
    +350            if not any(e in value for e in entries):
    +351                missing_categories.append({cat: entries})
    +352
    +353        if missing_categories:
    +354            raise ValueError(
    +355                "Missing tags from bioimage.io categories: {missing_categories}"
    +356            )
    +357
    +358        return value
    +359
    +360    version: Optional[Version] = None
    +361    """The version of the resource following SemVer 2.0."""
    +362
    +363    version_number: Optional[int] = None
    +364    """version number (n-th published version, not the semantic version)"""
    +365
    +366
    +367class GenericDescrBase(GenericModelDescrBase):
    +368    """Base for all resource descriptions except for the model descriptions"""
    +369
    +370    format_version: Literal["0.2.4"] = "0.2.4"
    +371    """The format version of this resource specification
    +372    (not the `version` of the resource description)
    +373    When creating a new resource always use the latest micro/patch version described here.
    +374    The `format_version` is important for any consumer software to understand how to parse the fields.
    +375    """
    +376
    +377    @model_validator(mode="before")
    +378    @classmethod
    +379    def _convert_from_older_format(
    +380        cls, data: BioimageioYamlContent, /
    +381    ) -> BioimageioYamlContent:
    +382        _convert_from_older_format(data)
    +383        return data
    +384
    +385    badges: List[BadgeDescr] = Field(default_factory=list)
    +386    """badges associated with this resource"""
    +387
    +388    documentation: Annotated[
    +389        Optional[ImportantFileSource],
    +390        Field(
    +391            examples=[
    +392                "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
    +393                "README.md",
    +394            ],
    +395        ),
    +396    ] = None
    +397    """∈📦 URL or relative path to a markdown file with additional documentation.
    +398    The recommended documentation file name is `README.md`. An `.md` suffix is mandatory."""
    +399
    +400    license: Annotated[
    +401        Union[LicenseId, DeprecatedLicenseId, str, None],
    +402        Field(union_mode="left_to_right", examples=["CC0-1.0", "MIT", "BSD-2-Clause"]),
    +403    ] = None
    +404    """A [SPDX license identifier](https://spdx.org/licenses/).
    +405    We do not support custom license beyond the SPDX license list, if you need that please
    +406    [open a GitHub issue](https://github.com/bioimage-io/spec-bioimage-io/issues/new/choose
    +407    ) to discuss your intentions with the community."""
    +408
    +409    @field_validator("license", mode="after")
    +410    @classmethod
    +411    def deprecated_spdx_license(
    +412        cls, value: Optional[Union[LicenseId, DeprecatedLicenseId, str]]
    +413    ):
    +414        if isinstance(value, LicenseId):
    +415            pass
    +416        elif value is None:
    +417            issue_warning("missing", value=value, field="license")
    +418        elif isinstance(value, DeprecatedLicenseId):
    +419            issue_warning(
    +420                "'{value}' is a deprecated license identifier.",
    +421                value=value,
    +422                field="license",
    +423            )
    +424        elif isinstance(value, str):
    +425            issue_warning(
    +426                "'{value}' is an unknown license identifier.",
    +427                value=value,
    +428                field="license",
    +429            )
    +430        else:
    +431            assert_never(value)
    +432
    +433        return value
    +434
    +435
    +436ResourceDescrType = TypeVar("ResourceDescrType", bound=GenericDescrBase)
    +437
    +438
    +439class GenericDescr(
    +440    GenericDescrBase, extra="ignore", title="bioimage.io generic specification"
    +441):
    +442    """Specification of the fields used in a generic bioimage.io-compliant resource description file (RDF).
    +443
    +444    An RDF is a YAML file that describes a resource such as a model, a dataset, or a notebook.
    +445    Note that those resources are described with a type-specific RDF.
    +446    Use this generic resource description, if none of the known specific types matches your resource.
    +447    """
    +448
    +449    type: Annotated[str, LowerCase, Field(frozen=True)] = "generic"
    +450    """The resource type assigns a broad category to the resource."""
    +451
    +452    id: Optional[ResourceId] = None
    +453    """bioimage.io-wide unique resource identifier
    +454    assigned by bioimage.io; version **un**specific."""
    +455
    +456    source: Optional[HttpUrl] = None
    +457    """The primary source of the resource"""
    +458
    +459    @field_validator("type", mode="after")
    +460    @classmethod
    +461    def check_specific_types(cls, value: str) -> str:
    +462        if value in KNOWN_SPECIFIC_RESOURCE_TYPES:
    +463            raise ValueError(
    +464                f"Use the {value} description instead of this generic description for"
    +465                + f" your '{value}' resource."
    +466            )
    +467
    +468        return value
    +
    + + +
    +
    + +
    + + class + ResourceId(bioimageio.spec._internal.validated_string.ValidatedString): + + + +
    + +
    58class ResourceId(ValidatedString):
    +59    root_model: ClassVar[Type[RootModel[Any]]] = RootModel[
    +60        Annotated[
    +61            NotEmpty[str],
    +62            AfterValidator(str.lower),  # convert upper case on the fly
    +63            RestrictCharacters(string.ascii_lowercase + string.digits + "_-/."),
    +64            annotated_types.Predicate(
    +65                lambda s: not (s.startswith("/") or s.endswith("/"))
    +66            ),
    +67        ]
    +68    ]
    +
    + + +

    str(object='') -> str +str(bytes_or_buffer[, encoding[, errors]]) -> str

    + +

    Create a new string object from the given object. If encoding or +errors is specified, then the object must expose a data buffer +that will be decoded using the given encoding and error handler. +Otherwise, returns the result of object.__str__() (if defined) +or repr(object). +encoding defaults to sys.getdefaultencoding(). +errors defaults to 'strict'.

    +
    + + +
    +
    + root_model: ClassVar[Type[pydantic.root_model.RootModel[Any]]] = + + <class 'pydantic.root_model.RootModel[Annotated[str, MinLen, AfterValidator, RestrictCharacters, Predicate]]'> + + +
    + + +

    the pydantic root model to validate the string

    +
    + + +
    +
    +
    +
    + KNOWN_SPECIFIC_RESOURCE_TYPES = +('application', 'collection', 'dataset', 'model', 'notebook') + + +
    + + + + +
    +
    +
    + VALID_COVER_IMAGE_EXTENSIONS = +('.gif', '.jpeg', '.jpg', '.png', '.svg', '.tif', '.tiff') + + +
    + + + + +
    +
    +
    + CoverImageSource = + + typing.Annotated[typing.Union[typing.Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute)], bioimageio.spec._internal.io.RelativeFilePath, bioimageio.spec._internal.url.HttpUrl], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), WithSuffix(suffix=('.gif', '.jpeg', '.jpg', '.png', '.svg', '.tif', '.tiff'), case_sensitive=False), PlainSerializer(func=<function _package>, return_type=PydanticUndefined, when_used='unless-none')] + + +
    + + + + +
    +
    + +
    + + class + AttachmentsDescr(bioimageio.spec._internal.node.Node): + + + +
    + +
     98class AttachmentsDescr(Node):
    + 99    model_config = {**Node.model_config, "extra": "allow"}
    +100    """update pydantic model config to allow additional unknown keys"""
    +101    files: List[ImportantFileSource] = Field(default_factory=list)
    +102    """∈📦 File attachments"""
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + model_config = + + {'extra': 'allow', 'frozen': False, 'populate_by_name': True, 'revalidate_instances': 'never', 'validate_assignment': True, 'validate_default': False, 'validate_return': True, 'use_attribute_docstrings': True} + + +
    + + +

    update pydantic model config to allow additional unknown keys

    +
    + + +
    +
    +
    + files: List[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')]] + + +
    + + +

    ∈📦 File attachments

    +
    + + +
    +
    +
    + +
    + + class + Uploader(bioimageio.spec._internal.node.Node): + + + +
    + +
    109class Uploader(Node):
    +110    email: EmailStr
    +111    """Email"""
    +112    name: Optional[Annotated[str, AfterValidator(_remove_slashes)]] = None
    +113    """name"""
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + email: pydantic.networks.EmailStr + + +
    + + +

    Email

    +
    + + +
    +
    +
    + name: Optional[Annotated[str, AfterValidator(func=<function _remove_slashes at 0x7f537f154680>)]] + + +
    + + +

    name

    +
    + + +
    +
    +
    + +
    + + class + Author(_Person): + + + +
    + +
    131class Author(_Person):
    +132    name: Annotated[str, AfterValidator(_remove_slashes)]
    +133    github_user: Optional[str] = None  # TODO: validate github_user
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + name: Annotated[str, AfterValidator(func=<function _remove_slashes at 0x7f537f154680>)] + + +
    + + + + +
    +
    +
    + github_user: Optional[str] + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + Maintainer(_Person): + + + +
    + +
    136class Maintainer(_Person):
    +137    name: Optional[Annotated[str, AfterValidator(_remove_slashes)]] = None
    +138    github_user: str
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + name: Optional[Annotated[str, AfterValidator(func=<function _remove_slashes at 0x7f537f154680>)]] + + +
    + + + + +
    +
    +
    + github_user: str + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + BadgeDescr(bioimageio.spec._internal.node.Node): + + + +
    + +
    141class BadgeDescr(Node, title="Custom badge"):
    +142    """A custom badge"""
    +143
    +144    label: Annotated[str, Field(examples=["Open in Colab"])]
    +145    """badge label to display on hover"""
    +146
    +147    icon: Annotated[
    +148        Optional[InPackageIfLocalFileSource],
    +149        Field(examples=["https://colab.research.google.com/assets/colab-badge.svg"]),
    +150    ] = None
    +151    """badge icon"""
    +152
    +153    url: Annotated[
    +154        HttpUrl,
    +155        Field(
    +156            examples=[
    +157                "https://colab.research.google.com/github/HenriquesLab/ZeroCostDL4Mic/blob/master/Colab_notebooks/U-net_2D_ZeroCostDL4Mic.ipynb"
    +158            ]
    +159        ),
    +160    ]
    +161    """target URL"""
    +
    + + +

    A custom badge

    +
    + + +
    +
    + label: Annotated[str, FieldInfo(annotation=NoneType, required=True, examples=['Open in Colab'])] + + +
    + + +

    badge label to display on hover

    +
    + + +
    +
    +
    + icon: Annotated[Union[Annotated[Union[Annotated[pathlib.Path, PathType(path_type='file')], bioimageio.spec._internal.io.RelativeFilePath], AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')], bioimageio.spec._internal.url.HttpUrl, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], NoneType], FieldInfo(annotation=NoneType, required=True, examples=['https://colab.research.google.com/assets/colab-badge.svg'])] + + +
    + + +

    badge icon

    +
    + + +
    +
    +
    + url: Annotated[bioimageio.spec._internal.url.HttpUrl, FieldInfo(annotation=NoneType, required=True, examples=['https://colab.research.google.com/github/HenriquesLab/ZeroCostDL4Mic/blob/master/Colab_notebooks/U-net_2D_ZeroCostDL4Mic.ipynb'])] + + +
    + + +

    target URL

    +
    + + +
    +
    +
    + +
    + + class + CiteEntry(bioimageio.spec._internal.node.Node): + + + +
    + +
    164class CiteEntry(Node):
    +165    text: str
    +166    """free text description"""
    +167
    +168    doi: Optional[Doi] = None
    +169    """A digital object identifier (DOI) is the prefered citation reference.
    +170    See https://www.doi.org/ for details. (alternatively specify `url`)"""
    +171
    +172    @field_validator("doi", mode="before")
    +173    @classmethod
    +174    def accept_prefixed_doi(cls, doi: Any) -> Any:
    +175        if isinstance(doi, str):
    +176            for doi_prefix in ("https://doi.org/", "http://dx.doi.org/"):
    +177                if doi.startswith(doi_prefix):
    +178                    doi = doi[len(doi_prefix) :]
    +179                    break
    +180
    +181        return doi
    +182
    +183    url: Optional[str] = None
    +184    """URL to cite (preferably specify a `doi` instead)"""
    +185
    +186    @model_validator(mode="after")
    +187    def _check_doi_or_url(self) -> Self:
    +188        if not self.doi and not self.url:
    +189            raise ValueError("Either 'doi' or 'url' is required")
    +190
    +191        return self
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + text: str + + +
    + + +

    free text description

    +
    + + +
    +
    +
    + doi: Optional[bioimageio.spec._internal.types.Doi] + + +
    + + +

    A digital object identifier (DOI) is the prefered citation reference. +See https://www.doi.org/ for details. (alternatively specify url)

    +
    + + +
    +
    + +
    +
    @field_validator('doi', mode='before')
    +
    @classmethod
    + + def + accept_prefixed_doi(cls, doi: Any) -> Any: + + + +
    + +
    172    @field_validator("doi", mode="before")
    +173    @classmethod
    +174    def accept_prefixed_doi(cls, doi: Any) -> Any:
    +175        if isinstance(doi, str):
    +176            for doi_prefix in ("https://doi.org/", "http://dx.doi.org/"):
    +177                if doi.startswith(doi_prefix):
    +178                    doi = doi[len(doi_prefix) :]
    +179                    break
    +180
    +181        return doi
    +
    + + + + +
    +
    +
    + url: Optional[str] + + +
    + + +

    URL to cite (preferably specify a doi instead)

    +
    + + +
    +
    +
    + +
    + + class + LinkedResource(bioimageio.spec._internal.node.Node): + + + +
    + +
    194class LinkedResource(Node):
    +195    """Reference to a bioimage.io resource"""
    +196
    +197    id: ResourceId
    +198    """A valid resource `id` from the bioimage.io collection."""
    +199
    +200    version_number: Optional[int] = None
    +201    """version number (n-th published version, not the semantic version) of linked resource"""
    +
    + + +

    Reference to a bioimage.io resource

    +
    + + +
    +
    + id: ResourceId + + +
    + + +

    A valid resource id from the bioimage.io collection.

    +
    + + +
    +
    +
    + version_number: Optional[int] + + +
    + + +

    version number (n-th published version, not the semantic version) of linked resource

    +
    + + +
    +
    +
    + +
    + + class + GenericModelDescrBase(bioimageio.spec._internal.common_nodes.ResourceDescrBase): + + + +
    + +
    204class GenericModelDescrBase(ResourceDescrBase):
    +205    """Base for all resource descriptions including of model descriptions"""
    +206
    +207    name: Annotated[NotEmpty[str], warn(MaxLen(128), "Longer than 128 characters.")]
    +208    """A human-friendly name of the resource description"""
    +209
    +210    description: str
    +211
    +212    covers: Annotated[
    +213        List[CoverImageSource],
    +214        Field(
    +215            examples=["cover.png"],
    +216            description=(
    +217                "Cover images. Please use an image smaller than 500KB and an aspect"
    +218                " ratio width to height of 2:1.\nThe supported image formats are:"
    +219                f" {VALID_COVER_IMAGE_EXTENSIONS}"
    +220            ),
    +221        ),
    +222    ] = Field(
    +223        default_factory=list,
    +224    )
    +225    """∈📦 Cover images. Please use an image smaller than 500KB and an aspect ratio width to height of 2:1."""
    +226
    +227    id_emoji: Optional[Annotated[str, Len(min_length=1, max_length=1)]] = None
    +228    """UTF-8 emoji for display alongside the `id`."""
    +229
    +230    authors: List[Author] = Field(default_factory=list)
    +231    """The authors are the creators of the RDF and the primary points of contact."""
    +232
    +233    @field_validator("authors", mode="before")
    +234    @classmethod
    +235    def accept_author_strings(cls, authors: Union[Any, Sequence[Any]]) -> Any:
    +236        """we unofficially accept strings as author entries"""
    +237        if is_sequence(authors):
    +238            authors = [{"name": a} if isinstance(a, str) else a for a in authors]
    +239
    +240        if not authors:
    +241            issue_warning("missing", value=authors, field="authors")
    +242
    +243        return authors
    +244
    +245    attachments: Optional[AttachmentsDescr] = None
    +246    """file and other attachments"""
    +247
    +248    cite: List[CiteEntry] = Field(default_factory=list)
    +249    """citations"""
    +250
    +251    @field_validator("cite", mode="after")
    +252    @classmethod
    +253    def _warn_empty_cite(cls, value: Any):
    +254        if not value:
    +255            issue_warning("missing", value=value, field="cite")
    +256
    +257        return value
    +258
    +259    config: Annotated[
    +260        Dict[str, YamlValue],
    +261        Field(
    +262            examples=[
    +263                dict(
    +264                    bioimageio={
    +265                        "my_custom_key": 3837283,
    +266                        "another_key": {"nested": "value"},
    +267                    },
    +268                    imagej={"macro_dir": "path/to/macro/file"},
    +269                )
    +270            ],
    +271        ),
    +272    ] = Field(default_factory=dict)
    +273    """A field for custom configuration that can contain any keys not present in the RDF spec.
    +274    This means you should not store, for example, a github repo URL in `config` since we already have the
    +275    `git_repo` field defined in the spec.
    +276    Keys in `config` may be very specific to a tool or consumer software. To avoid conflicting definitions,
    +277    it is recommended to wrap added configuration into a sub-field named with the specific domain or tool name,
    +278    for example:
    +279    ```yaml
    +280    config:
    +281        bioimageio:  # here is the domain name
    +282            my_custom_key: 3837283
    +283            another_key:
    +284                nested: value
    +285        imagej:       # config specific to ImageJ
    +286            macro_dir: path/to/macro/file
    +287    ```
    +288    If possible, please use [`snake_case`](https://en.wikipedia.org/wiki/Snake_case) for keys in `config`.
    +289    You may want to list linked files additionally under `attachments` to include them when packaging a resource
    +290    (packaging a resource means downloading/copying important linked files and creating a ZIP archive that contains
    +291    an altered rdf.yaml file with local references to the downloaded files)"""
    +292
    +293    download_url: Optional[HttpUrl] = None
    +294    """URL to download the resource from (deprecated)"""
    +295
    +296    git_repo: Annotated[
    +297        Optional[str],
    +298        Field(
    +299            examples=[
    +300                "https://github.com/bioimage-io/spec-bioimage-io/tree/main/example_descriptions/models/unet2d_nuclei_broad"
    +301            ],
    +302        ),
    +303    ] = None
    +304    """A URL to the Git repository where the resource is being developed."""
    +305
    +306    icon: Union[
    +307        Annotated[str, Len(min_length=1, max_length=2)], ImportantFileSource, None
    +308    ] = None
    +309    """An icon for illustration"""
    +310
    +311    links: Annotated[
    +312        List[str],
    +313        Field(
    +314            examples=[
    +315                (
    +316                    "ilastik/ilastik",
    +317                    "deepimagej/deepimagej",
    +318                    "zero/notebook_u-net_3d_zerocostdl4mic",
    +319                )
    +320            ],
    +321        ),
    +322    ] = Field(default_factory=list)
    +323    """IDs of other bioimage.io resources"""
    +324
    +325    uploader: Optional[Uploader] = None
    +326    """The person who uploaded the model (e.g. to bioimage.io)"""
    +327
    +328    maintainers: List[Maintainer] = Field(default_factory=list)
    +329    """Maintainers of this resource.
    +330    If not specified `authors` are maintainers and at least some of them should specify their `github_user` name"""
    +331
    +332    rdf_source: Optional[FileSource] = None
    +333    """Resource description file (RDF) source; used to keep track of where an rdf.yaml was loaded from.
    +334    Do not set this field in a YAML file."""
    +335
    +336    tags: Annotated[
    +337        List[str],
    +338        Field(examples=[("unet2d", "pytorch", "nucleus", "segmentation", "dsb2018")]),
    +339    ] = Field(default_factory=list)
    +340    """Associated tags"""
    +341
    +342    @as_warning
    +343    @field_validator("tags")
    +344    @classmethod
    +345    def warn_about_tag_categories(
    +346        cls, value: List[str], info: ValidationInfo
    +347    ) -> List[str]:
    +348        categories = TAG_CATEGORIES.get(info.data["type"], {})
    +349        missing_categories: List[Mapping[str, Sequence[str]]] = []
    +350        for cat, entries in categories.items():
    +351            if not any(e in value for e in entries):
    +352                missing_categories.append({cat: entries})
    +353
    +354        if missing_categories:
    +355            raise ValueError(
    +356                "Missing tags from bioimage.io categories: {missing_categories}"
    +357            )
    +358
    +359        return value
    +360
    +361    version: Optional[Version] = None
    +362    """The version of the resource following SemVer 2.0."""
    +363
    +364    version_number: Optional[int] = None
    +365    """version number (n-th published version, not the semantic version)"""
    +
    + + +

    Base for all resource descriptions including of model descriptions

    +
    + + +
    +
    + name: Annotated[str, MinLen(min_length=1), AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f537f13b380>, severity=30, msg='Longer than 128 characters.', context={'typ': Annotated[Any, MaxLen(max_length=128)]})] + + +
    + + +

    A human-friendly name of the resource description

    +
    + + +
    +
    +
    + description: str + + +
    + + + + +
    +
    +
    + covers: Annotated[List[Annotated[Union[Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute)], bioimageio.spec._internal.io.RelativeFilePath, bioimageio.spec._internal.url.HttpUrl], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), WithSuffix(suffix=('.gif', '.jpeg', '.jpg', '.png', '.svg', '.tif', '.tiff'), case_sensitive=False), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, description="Cover images. Please use an image smaller than 500KB and an aspect ratio width to height of 2:1.\nThe supported image formats are: ('.gif', '.jpeg', '.jpg', '.png', '.svg', '.tif', '.tiff')", examples=['cover.png'])] + + +
    + + +

    ∈📦 Cover images. Please use an image smaller than 500KB and an aspect ratio width to height of 2:1.

    +
    + + +
    +
    +
    + id_emoji: Optional[Annotated[str, Len(min_length=1, max_length=1)]] + + +
    + + +

    UTF-8 emoji for display alongside the id.

    +
    + + +
    +
    +
    + authors: List[Author] + + +
    + + +

    The authors are the creators of the RDF and the primary points of contact.

    +
    + + +
    +
    + +
    +
    @field_validator('authors', mode='before')
    +
    @classmethod
    + + def + accept_author_strings(cls, authors: Union[Any, Sequence[Any]]) -> Any: + + + +
    + +
    233    @field_validator("authors", mode="before")
    +234    @classmethod
    +235    def accept_author_strings(cls, authors: Union[Any, Sequence[Any]]) -> Any:
    +236        """we unofficially accept strings as author entries"""
    +237        if is_sequence(authors):
    +238            authors = [{"name": a} if isinstance(a, str) else a for a in authors]
    +239
    +240        if not authors:
    +241            issue_warning("missing", value=authors, field="authors")
    +242
    +243        return authors
    +
    + + +

    we unofficially accept strings as author entries

    +
    + + +
    +
    +
    + attachments: Optional[AttachmentsDescr] + + +
    + + +

    file and other attachments

    +
    + + +
    +
    +
    + cite: List[CiteEntry] + + +
    + + +

    citations

    +
    + + +
    +
    +
    + config: Annotated[Dict[str, YamlValue], FieldInfo(annotation=NoneType, required=True, examples=[{'bioimageio': {'my_custom_key': 3837283, 'another_key': {'nested': 'value'}}, 'imagej': {'macro_dir': 'path/to/macro/file'}}])] + + +
    + + +

    A field for custom configuration that can contain any keys not present in the RDF spec. +This means you should not store, for example, a github repo URL in config since we already have the +git_repo field defined in the spec. +Keys in config may be very specific to a tool or consumer software. To avoid conflicting definitions, +it is recommended to wrap added configuration into a sub-field named with the specific domain or tool name, +for example:

    + +
    +
    config:
    +    bioimageio:  # here is the domain name
    +        my_custom_key: 3837283
    +        another_key:
    +            nested: value
    +    imagej:       # config specific to ImageJ
    +        macro_dir: path/to/macro/file
    +
    +
    + +

    If possible, please use snake_case for keys in config. +You may want to list linked files additionally under attachments to include them when packaging a resource +(packaging a resource means downloading/copying important linked files and creating a ZIP archive that contains +an altered rdf.yaml file with local references to the downloaded files)

    +
    + + +
    +
    +
    + download_url: Optional[bioimageio.spec._internal.url.HttpUrl] + + +
    + + +

    URL to download the resource from (deprecated)

    +
    + + +
    +
    +
    + git_repo: Annotated[Optional[str], FieldInfo(annotation=NoneType, required=True, examples=['https://github.com/bioimage-io/spec-bioimage-io/tree/main/example_descriptions/models/unet2d_nuclei_broad'])] + + +
    + + +

    A URL to the Git repository where the resource is being developed.

    +
    + + +
    +
    +
    + icon: Union[Annotated[str, Len(min_length=1, max_length=2)], Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')], NoneType] + + +
    + + +

    An icon for illustration

    +
    + + +
    + +
    +
    + uploader: Optional[Uploader] + + +
    + + +

    The person who uploaded the model (e.g. to bioimage.io)

    +
    + + +
    +
    +
    + maintainers: List[Maintainer] + + +
    + + +

    Maintainers of this resource. +If not specified authors are maintainers and at least some of them should specify their github_user name

    +
    + + +
    +
    +
    + rdf_source: Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])]] + + +
    + + +

    Resource description file (RDF) source; used to keep track of where an rdf.yaml was loaded from. +Do not set this field in a YAML file.

    +
    + + +
    +
    +
    + tags: Annotated[List[str], FieldInfo(annotation=NoneType, required=True, examples=[('unet2d', 'pytorch', 'nucleus', 'segmentation', 'dsb2018')])] + + +
    + + +

    Associated tags

    +
    + + +
    +
    + +
    + + def + warn_about_tag_categories(value: Any, info: pydantic_core.core_schema.ValidationInfo) -> Any: + + + +
    + +
    75    def wrapper(value: Any, info: ValidationInfo) -> Any:
    +76        try:
    +77            call_validator_func(func, mode, value, info)
    +78        except (AssertionError, ValueError) as e:
    +79            issue_warning(
    +80                msg or ",".join(e.args),
    +81                value=value,
    +82                severity=severity,
    +83                msg_context=msg_context,
    +84            )
    +85
    +86        return value
    +
    + + + + +
    +
    +
    + version: Optional[bioimageio.spec._internal.version_type.Version] + + +
    + + +

    The version of the resource following SemVer 2.0.

    +
    + + +
    +
    +
    + version_number: Optional[int] + + +
    + + +

    version number (n-th published version, not the semantic version)

    +
    + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    +
    +
    + +
    + + class + GenericDescrBase(GenericModelDescrBase): + + + +
    + +
    368class GenericDescrBase(GenericModelDescrBase):
    +369    """Base for all resource descriptions except for the model descriptions"""
    +370
    +371    format_version: Literal["0.2.4"] = "0.2.4"
    +372    """The format version of this resource specification
    +373    (not the `version` of the resource description)
    +374    When creating a new resource always use the latest micro/patch version described here.
    +375    The `format_version` is important for any consumer software to understand how to parse the fields.
    +376    """
    +377
    +378    @model_validator(mode="before")
    +379    @classmethod
    +380    def _convert_from_older_format(
    +381        cls, data: BioimageioYamlContent, /
    +382    ) -> BioimageioYamlContent:
    +383        _convert_from_older_format(data)
    +384        return data
    +385
    +386    badges: List[BadgeDescr] = Field(default_factory=list)
    +387    """badges associated with this resource"""
    +388
    +389    documentation: Annotated[
    +390        Optional[ImportantFileSource],
    +391        Field(
    +392            examples=[
    +393                "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
    +394                "README.md",
    +395            ],
    +396        ),
    +397    ] = None
    +398    """∈📦 URL or relative path to a markdown file with additional documentation.
    +399    The recommended documentation file name is `README.md`. An `.md` suffix is mandatory."""
    +400
    +401    license: Annotated[
    +402        Union[LicenseId, DeprecatedLicenseId, str, None],
    +403        Field(union_mode="left_to_right", examples=["CC0-1.0", "MIT", "BSD-2-Clause"]),
    +404    ] = None
    +405    """A [SPDX license identifier](https://spdx.org/licenses/).
    +406    We do not support custom license beyond the SPDX license list, if you need that please
    +407    [open a GitHub issue](https://github.com/bioimage-io/spec-bioimage-io/issues/new/choose
    +408    ) to discuss your intentions with the community."""
    +409
    +410    @field_validator("license", mode="after")
    +411    @classmethod
    +412    def deprecated_spdx_license(
    +413        cls, value: Optional[Union[LicenseId, DeprecatedLicenseId, str]]
    +414    ):
    +415        if isinstance(value, LicenseId):
    +416            pass
    +417        elif value is None:
    +418            issue_warning("missing", value=value, field="license")
    +419        elif isinstance(value, DeprecatedLicenseId):
    +420            issue_warning(
    +421                "'{value}' is a deprecated license identifier.",
    +422                value=value,
    +423                field="license",
    +424            )
    +425        elif isinstance(value, str):
    +426            issue_warning(
    +427                "'{value}' is an unknown license identifier.",
    +428                value=value,
    +429                field="license",
    +430            )
    +431        else:
    +432            assert_never(value)
    +433
    +434        return value
    +
    + + +

    Base for all resource descriptions except for the model descriptions

    +
    + + +
    +
    + format_version: Literal['0.2.4'] + + +
    + + +

    The format version of this resource specification +(not the version of the resource description) +When creating a new resource always use the latest micro/patch version described here. +The format_version is important for any consumer software to understand how to parse the fields.

    +
    + + +
    +
    +
    + badges: List[BadgeDescr] + + +
    + + +

    badges associated with this resource

    +
    + + +
    +
    +
    + documentation: Annotated[Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, examples=['https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md', 'README.md'])] + + +
    + + +

    ∈📦 URL or relative path to a markdown file with additional documentation. +The recommended documentation file name is README.md. An .md suffix is mandatory.

    +
    + + +
    +
    +
    + license: Annotated[Union[bioimageio.spec._internal.license_id.LicenseId, bioimageio.spec._internal.license_id.DeprecatedLicenseId, str, NoneType], FieldInfo(annotation=NoneType, required=True, examples=['CC0-1.0', 'MIT', 'BSD-2-Clause'], metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])] + + +
    + + +

    A SPDX license identifier. +We do not support custom license beyond the SPDX license list, if you need that please +open a GitHub issue to discuss your intentions with the community.

    +
    + + +
    +
    + +
    +
    @field_validator('license', mode='after')
    +
    @classmethod
    + + def + deprecated_spdx_license( cls, value: Union[bioimageio.spec._internal.license_id.LicenseId, bioimageio.spec._internal.license_id.DeprecatedLicenseId, str, NoneType]): + + + +
    + +
    410    @field_validator("license", mode="after")
    +411    @classmethod
    +412    def deprecated_spdx_license(
    +413        cls, value: Optional[Union[LicenseId, DeprecatedLicenseId, str]]
    +414    ):
    +415        if isinstance(value, LicenseId):
    +416            pass
    +417        elif value is None:
    +418            issue_warning("missing", value=value, field="license")
    +419        elif isinstance(value, DeprecatedLicenseId):
    +420            issue_warning(
    +421                "'{value}' is a deprecated license identifier.",
    +422                value=value,
    +423                field="license",
    +424            )
    +425        elif isinstance(value, str):
    +426            issue_warning(
    +427                "'{value}' is an unknown license identifier.",
    +428                value=value,
    +429                field="license",
    +430            )
    +431        else:
    +432            assert_never(value)
    +433
    +434        return value
    +
    + + + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.2.4' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 2, 4) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    + +
    + + class + GenericDescr(GenericDescrBase): + + + +
    + +
    440class GenericDescr(
    +441    GenericDescrBase, extra="ignore", title="bioimage.io generic specification"
    +442):
    +443    """Specification of the fields used in a generic bioimage.io-compliant resource description file (RDF).
    +444
    +445    An RDF is a YAML file that describes a resource such as a model, a dataset, or a notebook.
    +446    Note that those resources are described with a type-specific RDF.
    +447    Use this generic resource description, if none of the known specific types matches your resource.
    +448    """
    +449
    +450    type: Annotated[str, LowerCase, Field(frozen=True)] = "generic"
    +451    """The resource type assigns a broad category to the resource."""
    +452
    +453    id: Optional[ResourceId] = None
    +454    """bioimage.io-wide unique resource identifier
    +455    assigned by bioimage.io; version **un**specific."""
    +456
    +457    source: Optional[HttpUrl] = None
    +458    """The primary source of the resource"""
    +459
    +460    @field_validator("type", mode="after")
    +461    @classmethod
    +462    def check_specific_types(cls, value: str) -> str:
    +463        if value in KNOWN_SPECIFIC_RESOURCE_TYPES:
    +464            raise ValueError(
    +465                f"Use the {value} description instead of this generic description for"
    +466                + f" your '{value}' resource."
    +467            )
    +468
    +469        return value
    +
    + + +

    Specification of the fields used in a generic bioimage.io-compliant resource description file (RDF).

    + +

    An RDF is a YAML file that describes a resource such as a model, a dataset, or a notebook. +Note that those resources are described with a type-specific RDF. +Use this generic resource description, if none of the known specific types matches your resource.

    +
    + + +
    +
    + type: Annotated[str, Annotated[~_StrType, Predicate(str.islower)], FieldInfo(annotation=NoneType, required=True, frozen=True)] + + +
    + + +

    The resource type assigns a broad category to the resource.

    +
    + + +
    +
    +
    + id: Optional[ResourceId] + + +
    + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    +
    + source: Optional[bioimageio.spec._internal.url.HttpUrl] + + +
    + + +

    The primary source of the resource

    +
    + + +
    +
    + +
    +
    @field_validator('type', mode='after')
    +
    @classmethod
    + + def + check_specific_types(cls, value: str) -> str: + + + +
    + +
    460    @field_validator("type", mode="after")
    +461    @classmethod
    +462    def check_specific_types(cls, value: str) -> str:
    +463        if value in KNOWN_SPECIFIC_RESOURCE_TYPES:
    +464            raise ValueError(
    +465                f"Use the {value} description instead of this generic description for"
    +466                + f" your '{value}' resource."
    +467            )
    +468
    +469        return value
    +
    + + + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.2.4' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 2, 4) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/generic/v0_3.html b/bioimageio/spec/generic/v0_3.html new file mode 100644 index 00000000..cc9bdd0d --- /dev/null +++ b/bioimageio/spec/generic/v0_3.html @@ -0,0 +1,2190 @@ + + + + + + + bioimageio.spec.generic.v0_3 API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.generic.v0_3

    + + + + + + +
      1from __future__ import annotations
    +  2
    +  3import string
    +  4from functools import partial
    +  5from typing import (
    +  6    Any,
    +  7    ClassVar,
    +  8    Dict,
    +  9    List,
    + 10    Literal,
    + 11    Optional,
    + 12    Sequence,
    + 13    Type,
    + 14    TypeVar,
    + 15    Union,
    + 16)
    + 17
    + 18import annotated_types
    + 19from annotated_types import Len, LowerCase, MaxLen, MinLen
    + 20from pydantic import Field, RootModel, ValidationInfo, field_validator, model_validator
    + 21from typing_extensions import Annotated
    + 22
    + 23from .._internal.common_nodes import (
    + 24    Converter,
    + 25    Node,
    + 26    ResourceDescrBase,
    + 27)
    + 28from .._internal.constants import (
    + 29    TAG_CATEGORIES,
    + 30)
    + 31from .._internal.field_validation import validate_gh_user
    + 32from .._internal.field_warning import as_warning, warn
    + 33from .._internal.io import (
    + 34    BioimageioYamlContent,
    + 35    V_suffix,
    + 36    YamlValue,
    + 37    include_in_package_serializer,
    + 38    validate_suffix,
    + 39)
    + 40from .._internal.io import FileDescr as FileDescr
    + 41from .._internal.io_basics import AbsoluteFilePath
    + 42from .._internal.io_basics import Sha256 as Sha256
    + 43from .._internal.license_id import DeprecatedLicenseId as DeprecatedLicenseId
    + 44from .._internal.license_id import LicenseId as LicenseId
    + 45from .._internal.types import (
    + 46    ImportantFileSource,
    + 47    NotEmpty,
    + 48)
    + 49from .._internal.types import RelativeFilePath as RelativeFilePath
    + 50from .._internal.url import HttpUrl as HttpUrl
    + 51from .._internal.validated_string import ValidatedString
    + 52from .._internal.validator_annotations import (
    + 53    AfterValidator,
    + 54    Predicate,
    + 55    RestrictCharacters,
    + 56)
    + 57from .._internal.version_type import Version as Version
    + 58from .._internal.warning_levels import ALERT, INFO
    + 59from ._v0_3_converter import convert_from_older_format
    + 60from .v0_2 import Author as _Author_v0_2
    + 61from .v0_2 import BadgeDescr as BadgeDescr
    + 62from .v0_2 import CoverImageSource
    + 63from .v0_2 import Doi as Doi
    + 64from .v0_2 import Maintainer as _Maintainer_v0_2
    + 65from .v0_2 import OrcidId as OrcidId
    + 66from .v0_2 import Uploader as Uploader
    + 67
    + 68KNOWN_SPECIFIC_RESOURCE_TYPES = (
    + 69    "application",
    + 70    "collection",
    + 71    "dataset",
    + 72    "model",
    + 73    "notebook",
    + 74)
    + 75VALID_COVER_IMAGE_EXTENSIONS = (
    + 76    ".gif",
    + 77    ".jpeg",
    + 78    ".jpg",
    + 79    ".png",
    + 80    ".svg",
    + 81)
    + 82
    + 83
    + 84class ResourceId(ValidatedString):
    + 85    root_model: ClassVar[Type[RootModel[Any]]] = RootModel[
    + 86        Annotated[
    + 87            NotEmpty[str],
    + 88            RestrictCharacters(string.ascii_lowercase + string.digits + "_-/."),
    + 89            annotated_types.Predicate(
    + 90                lambda s: not (s.startswith("/") or s.endswith("/"))
    + 91            ),
    + 92        ]
    + 93    ]
    + 94
    + 95
    + 96def _validate_md_suffix(value: V_suffix) -> V_suffix:
    + 97    return validate_suffix(value, suffix=".md", case_sensitive=True)
    + 98
    + 99
    +100DocumentationSource = Annotated[
    +101    Union[AbsoluteFilePath, RelativeFilePath, HttpUrl],
    +102    Field(union_mode="left_to_right"),
    +103    AfterValidator(_validate_md_suffix),
    +104    include_in_package_serializer,
    +105]
    +106
    +107
    +108def _has_no_slash(s: str) -> bool:
    +109    return "/" not in s and "\\" not in s
    +110
    +111
    +112class Author(_Author_v0_2):
    +113    name: Annotated[str, Predicate(_has_no_slash)]
    +114    github_user: Optional[str] = None
    +115
    +116    @field_validator("github_user", mode="after")
    +117    def _validate_gh_user(cls, value: Optional[str]):
    +118        if value is None:
    +119            return None
    +120        else:
    +121            return validate_gh_user(value)
    +122
    +123
    +124class _AuthorConv(Converter[_Author_v0_2, Author]):
    +125    def _convert(
    +126        self, src: _Author_v0_2, tgt: "type[Author] | type[dict[str, Any]]"
    +127    ) -> "Author | dict[str, Any]":
    +128        return tgt(
    +129            name=src.name,
    +130            github_user=src.github_user,
    +131            affiliation=src.affiliation,
    +132            email=src.email,
    +133            orcid=src.orcid,
    +134        )
    +135
    +136
    +137_author_conv = _AuthorConv(_Author_v0_2, Author)
    +138
    +139
    +140class Maintainer(_Maintainer_v0_2):
    +141    name: Optional[Annotated[str, Predicate(_has_no_slash)]] = None
    +142    github_user: str
    +143
    +144    @field_validator("github_user", mode="after")
    +145    def validate_gh_user(cls, value: str):
    +146        return validate_gh_user(value)
    +147
    +148
    +149class _MaintainerConv(Converter[_Maintainer_v0_2, Maintainer]):
    +150    def _convert(
    +151        self, src: _Maintainer_v0_2, tgt: "type[Maintainer | dict[str, Any]]"
    +152    ) -> "Maintainer | dict[str, Any]":
    +153        return tgt(
    +154            name=src.name,
    +155            github_user=src.github_user,
    +156            affiliation=src.affiliation,
    +157            email=src.email,
    +158            orcid=src.orcid,
    +159        )
    +160
    +161
    +162_maintainer_conv = _MaintainerConv(_Maintainer_v0_2, Maintainer)
    +163
    +164
    +165class CiteEntry(Node):
    +166    text: str
    +167    """free text description"""
    +168
    +169    doi: Optional[Doi] = None
    +170    """A digital object identifier (DOI) is the prefered citation reference.
    +171    See https://www.doi.org/ for details. (alternatively specify `url`)"""
    +172
    +173    url: Optional[HttpUrl] = None
    +174    """URL to cite (preferably specify a `doi` instead)"""
    +175
    +176    @model_validator(mode="after")
    +177    def _check_doi_or_url(self):
    +178        if not self.doi and not self.url:
    +179            raise ValueError("Either 'doi' or 'url' is required")
    +180
    +181        return self
    +182
    +183
    +184class LinkedResource(Node):
    +185    """Reference to a bioimage.io resource"""
    +186
    +187    id: ResourceId
    +188    """A valid resource `id` from the official bioimage.io collection."""
    +189
    +190
    +191class GenericModelDescrBase(ResourceDescrBase):
    +192    """Base for all resource descriptions including of model descriptions"""
    +193
    +194    name: Annotated[
    +195        Annotated[
    +196            str, RestrictCharacters(string.ascii_letters + string.digits + "_- ()")
    +197        ],
    +198        MinLen(5),
    +199        MaxLen(128),
    +200        warn(MaxLen(64), "Name longer than 64 characters.", INFO),
    +201    ]
    +202    name: Annotated[NotEmpty[str], MaxLen(128)]
    +203    """A human-friendly name of the resource description.
    +204    May only contains letters, digits, underscore, minus, parentheses and spaces."""
    +205
    +206    description: Annotated[
    +207        str, MaxLen(1024), warn(MaxLen(512), "Description longer than 512 characters.")
    +208    ]
    +209    """A string containing a brief description."""
    +210
    +211    covers: Annotated[
    +212        List[CoverImageSource],
    +213        Field(
    +214            examples=[],
    +215            description=(
    +216                "Cover images. Please use an image smaller than 500KB and an aspect"
    +217                " ratio width to height of 2:1 or 1:1.\nThe supported image formats"
    +218                f" are: {VALID_COVER_IMAGE_EXTENSIONS}"
    +219            ),
    +220        ),
    +221    ] = Field(default_factory=list)
    +222    """∈📦 Cover images."""
    +223
    +224    id_emoji: Optional[Annotated[str, Len(min_length=1, max_length=2)]] = None
    +225    """UTF-8 emoji for display alongside the `id`."""
    +226
    +227    authors: NotEmpty[List[Author]]
    +228    """The authors are the creators of this resource description and the primary points of contact."""
    +229
    +230    attachments: List[FileDescr] = Field(default_factory=list)
    +231    """file attachments"""
    +232
    +233    cite: NotEmpty[List[CiteEntry]]
    +234    """citations"""
    +235
    +236    license: Annotated[
    +237        Annotated[
    +238            Union[LicenseId, DeprecatedLicenseId], Field(union_mode="left_to_right")
    +239        ],
    +240        warn(
    +241            LicenseId,
    +242            "{value} is deprecated, see https://spdx.org/licenses/{value}.html",
    +243        ),
    +244        Field(examples=["CC0-1.0", "MIT", "BSD-2-Clause"]),
    +245    ]
    +246    """A [SPDX license identifier](https://spdx.org/licenses/).
    +247    We do not support custom license beyond the SPDX license list, if you need that please
    +248    [open a GitHub issue](https://github.com/bioimage-io/spec-bioimage-io/issues/new/choose)
    +249    to discuss your intentions with the community."""
    +250
    +251    config: Annotated[
    +252        Dict[str, YamlValue],
    +253        Field(
    +254            examples=[
    +255                dict(
    +256                    bioimageio={
    +257                        "my_custom_key": 3837283,
    +258                        "another_key": {"nested": "value"},
    +259                    },
    +260                    imagej={"macro_dir": "path/to/macro/file"},
    +261                )
    +262            ],
    +263        ),
    +264    ] = Field(default_factory=dict)
    +265    """A field for custom configuration that can contain any keys not present in the RDF spec.
    +266    This means you should not store, for example, a GitHub repo URL in `config` since there is a `git_repo` field.
    +267    Keys in `config` may be very specific to a tool or consumer software. To avoid conflicting definitions,
    +268    it is recommended to wrap added configuration into a sub-field named with the specific domain or tool name,
    +269    for example:
    +270    ```yaml
    +271    config:
    +272        bioimageio:  # here is the domain name
    +273            my_custom_key: 3837283
    +274            another_key:
    +275                nested: value
    +276        imagej:       # config specific to ImageJ
    +277            macro_dir: path/to/macro/file
    +278    ```
    +279    If possible, please use [`snake_case`](https://en.wikipedia.org/wiki/Snake_case) for keys in `config`.
    +280    You may want to list linked files additionally under `attachments` to include them when packaging a resource.
    +281    (Packaging a resource means downloading/copying important linked files and creating a ZIP archive that contains
    +282    an altered rdf.yaml file with local references to the downloaded files.)"""
    +283
    +284    git_repo: Annotated[
    +285        Optional[HttpUrl],
    +286        Field(
    +287            examples=[
    +288                "https://github.com/bioimage-io/spec-bioimage-io/tree/main/example_descriptions/models/unet2d_nuclei_broad"
    +289            ],
    +290        ),
    +291    ] = None
    +292    """A URL to the Git repository where the resource is being developed."""
    +293
    +294    icon: Union[
    +295        Annotated[str, Len(min_length=1, max_length=2)], ImportantFileSource, None
    +296    ] = None
    +297    """An icon for illustration, e.g. on bioimage.io"""
    +298
    +299    links: Annotated[
    +300        List[str],
    +301        Field(
    +302            examples=[
    +303                (
    +304                    "ilastik/ilastik",
    +305                    "deepimagej/deepimagej",
    +306                    "zero/notebook_u-net_3d_zerocostdl4mic",
    +307                )
    +308            ],
    +309        ),
    +310    ] = Field(default_factory=list)
    +311    """IDs of other bioimage.io resources"""
    +312
    +313    uploader: Optional[Uploader] = None
    +314    """The person who uploaded the model (e.g. to bioimage.io)"""
    +315
    +316    maintainers: List[Maintainer] = Field(default_factory=list)
    +317    """Maintainers of this resource.
    +318    If not specified, `authors` are maintainers and at least some of them has to specify their `github_user` name"""
    +319
    +320    @partial(as_warning, severity=ALERT)
    +321    @field_validator("maintainers", mode="after")
    +322    @classmethod
    +323    def check_maintainers_exist(
    +324        cls, maintainers: List[Maintainer], info: ValidationInfo
    +325    ) -> List[Maintainer]:
    +326        if not maintainers and "authors" in info.data:
    +327            authors: List[Author] = info.data["authors"]
    +328            if all(a.github_user is None for a in authors):
    +329                raise ValueError(
    +330                    "Missing `maintainers` or any author in `authors` with a specified"
    +331                    + " `github_user` name."
    +332                )
    +333
    +334        return maintainers
    +335
    +336    tags: Annotated[
    +337        List[str],
    +338        Field(examples=[("unet2d", "pytorch", "nucleus", "segmentation", "dsb2018")]),
    +339    ] = Field(default_factory=list)
    +340    """Associated tags"""
    +341
    +342    @as_warning
    +343    @field_validator("tags")
    +344    @classmethod
    +345    def warn_about_tag_categories(
    +346        cls, value: List[str], info: ValidationInfo
    +347    ) -> List[str]:
    +348        categories = TAG_CATEGORIES.get(info.data["type"], {})
    +349        missing_categories: List[Dict[str, Sequence[str]]] = []
    +350        for cat, entries in categories.items():
    +351            if not any(e in value for e in entries):
    +352                missing_categories.append({cat: entries})
    +353
    +354        if missing_categories:
    +355            raise ValueError(
    +356                f"Missing tags from bioimage.io categories: {missing_categories}"
    +357            )
    +358
    +359        return value
    +360
    +361    version: Optional[Version] = None
    +362    """The version of the resource following SemVer 2.0."""
    +363
    +364    @model_validator(mode="before")
    +365    def _remove_version_number(  # pyright: ignore[reportUnknownParameterType]
    +366        cls, value: Union[Any, Dict[Any, Any]]
    +367    ):
    +368        if isinstance(value, dict):
    +369            vn: Any = value.pop("version_number", None)
    +370            if vn is not None and value.get("version") is None:
    +371                value["version"] = vn
    +372
    +373        return value  # pyright: ignore[reportUnknownVariableType]
    +374
    +375
    +376class GenericDescrBase(GenericModelDescrBase):
    +377    """Base for all resource descriptions except for the model descriptions"""
    +378
    +379    format_version: Literal["0.3.0"] = "0.3.0"
    +380    """The **format** version of this resource specification"""
    +381
    +382    @model_validator(mode="before")
    +383    @classmethod
    +384    def _convert_from_older_format(
    +385        cls, data: BioimageioYamlContent, /
    +386    ) -> BioimageioYamlContent:
    +387        convert_from_older_format(data)
    +388        return data
    +389
    +390    documentation: Annotated[
    +391        Optional[DocumentationSource],
    +392        Field(
    +393            examples=[
    +394                "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
    +395                "README.md",
    +396            ],
    +397        ),
    +398    ] = None
    +399    """∈📦 URL or relative path to a markdown file encoded in UTF-8 with additional documentation.
    +400    The recommended documentation file name is `README.md`. An `.md` suffix is mandatory."""
    +401
    +402    badges: List[BadgeDescr] = Field(default_factory=list)
    +403    """badges associated with this resource"""
    +404
    +405
    +406ResourceDescrType = TypeVar("ResourceDescrType", bound=GenericDescrBase)
    +407
    +408
    +409class GenericDescr(
    +410    GenericDescrBase, extra="ignore", title="bioimage.io generic specification"
    +411):
    +412    """Specification of the fields used in a generic bioimage.io-compliant resource description file (RDF).
    +413
    +414    An RDF is a YAML file that describes a resource such as a model, a dataset, or a notebook.
    +415    Note that those resources are described with a type-specific RDF.
    +416    Use this generic resource description, if none of the known specific types matches your resource.
    +417    """
    +418
    +419    type: Annotated[str, LowerCase] = Field("generic", frozen=True)
    +420    """The resource type assigns a broad category to the resource."""
    +421
    +422    id: Optional[ResourceId] = None
    +423    """bioimage.io-wide unique resource identifier
    +424    assigned by bioimage.io; version **un**specific."""
    +425
    +426    parent: Optional[ResourceId] = None
    +427    """The description from which this one is derived"""
    +428
    +429    source: Optional[HttpUrl] = None
    +430    """The primary source of the resource"""
    +431
    +432    @field_validator("type", mode="after")
    +433    @classmethod
    +434    def check_specific_types(cls, value: str) -> str:
    +435        if value in KNOWN_SPECIFIC_RESOURCE_TYPES:
    +436            raise ValueError(
    +437                f"Use the {value} description instead of this generic description for"
    +438                + f" your '{value}' resource."
    +439            )
    +440
    +441        return value
    +442
    +443
    +444class LinkedResourceNode(Node):
    +445
    +446    @model_validator(mode="before")
    +447    def _remove_version_number(  # pyright: ignore[reportUnknownParameterType]
    +448        cls, value: Union[Any, Dict[Any, Any]]
    +449    ):
    +450        if isinstance(value, dict):
    +451            vn: Any = value.pop("version_number", None)
    +452            if vn is not None and value.get("version") is None:
    +453                value["version"] = vn
    +454
    +455        return value  # pyright: ignore[reportUnknownVariableType]
    +456
    +457    version: Optional[Version] = None
    +458    """The version of the linked resource following SemVer 2.0."""
    +
    + + +
    +
    +
    + KNOWN_SPECIFIC_RESOURCE_TYPES = +('application', 'collection', 'dataset', 'model', 'notebook') + + +
    + + + + +
    +
    +
    + VALID_COVER_IMAGE_EXTENSIONS = +('.gif', '.jpeg', '.jpg', '.png', '.svg') + + +
    + + + + +
    +
    + +
    + + class + ResourceId(bioimageio.spec._internal.validated_string.ValidatedString): + + + +
    + +
    85class ResourceId(ValidatedString):
    +86    root_model: ClassVar[Type[RootModel[Any]]] = RootModel[
    +87        Annotated[
    +88            NotEmpty[str],
    +89            RestrictCharacters(string.ascii_lowercase + string.digits + "_-/."),
    +90            annotated_types.Predicate(
    +91                lambda s: not (s.startswith("/") or s.endswith("/"))
    +92            ),
    +93        ]
    +94    ]
    +
    + + +

    str(object='') -> str +str(bytes_or_buffer[, encoding[, errors]]) -> str

    + +

    Create a new string object from the given object. If encoding or +errors is specified, then the object must expose a data buffer +that will be decoded using the given encoding and error handler. +Otherwise, returns the result of object.__str__() (if defined) +or repr(object). +encoding defaults to sys.getdefaultencoding(). +errors defaults to 'strict'.

    +
    + + +
    +
    + root_model: ClassVar[Type[pydantic.root_model.RootModel[Any]]] = +<class 'pydantic.root_model.RootModel[Annotated[str, MinLen, RestrictCharacters, Predicate]]'> + + +
    + + +

    the pydantic root model to validate the string

    +
    + + +
    +
    +
    +
    + DocumentationSource = + + typing.Annotated[typing.Union[typing.Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute)], bioimageio.spec._internal.io.RelativeFilePath, bioimageio.spec._internal.url.HttpUrl], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function _validate_md_suffix>), PlainSerializer(func=<function _package>, return_type=PydanticUndefined, when_used='unless-none')] + + +
    + + + + +
    +
    + +
    + + class + Author(bioimageio.spec.generic.v0_2.Author): + + + +
    + +
    113class Author(_Author_v0_2):
    +114    name: Annotated[str, Predicate(_has_no_slash)]
    +115    github_user: Optional[str] = None
    +116
    +117    @field_validator("github_user", mode="after")
    +118    def _validate_gh_user(cls, value: Optional[str]):
    +119        if value is None:
    +120            return None
    +121        else:
    +122            return validate_gh_user(value)
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + name: Annotated[str, Predicate(func=<function _has_no_slash at 0x7f5375da8040>)] + + +
    + + + + +
    +
    +
    + github_user: Optional[str] + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + Maintainer(bioimageio.spec.generic.v0_2.Maintainer): + + + +
    + +
    141class Maintainer(_Maintainer_v0_2):
    +142    name: Optional[Annotated[str, Predicate(_has_no_slash)]] = None
    +143    github_user: str
    +144
    +145    @field_validator("github_user", mode="after")
    +146    def validate_gh_user(cls, value: str):
    +147        return validate_gh_user(value)
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + name: Optional[Annotated[str, Predicate(func=<function _has_no_slash at 0x7f5375da8040>)]] + + +
    + + + + +
    +
    +
    + github_user: str + + +
    + + + + +
    +
    + +
    +
    @field_validator('github_user', mode='after')
    + + def + validate_gh_user(cls, value: str): + + + +
    + +
    145    @field_validator("github_user", mode="after")
    +146    def validate_gh_user(cls, value: str):
    +147        return validate_gh_user(value)
    +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + CiteEntry(bioimageio.spec._internal.node.Node): + + + +
    + +
    166class CiteEntry(Node):
    +167    text: str
    +168    """free text description"""
    +169
    +170    doi: Optional[Doi] = None
    +171    """A digital object identifier (DOI) is the prefered citation reference.
    +172    See https://www.doi.org/ for details. (alternatively specify `url`)"""
    +173
    +174    url: Optional[HttpUrl] = None
    +175    """URL to cite (preferably specify a `doi` instead)"""
    +176
    +177    @model_validator(mode="after")
    +178    def _check_doi_or_url(self):
    +179        if not self.doi and not self.url:
    +180            raise ValueError("Either 'doi' or 'url' is required")
    +181
    +182        return self
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + text: str + + +
    + + +

    free text description

    +
    + + +
    +
    +
    + doi: Optional[bioimageio.spec._internal.types.Doi] + + +
    + + +

    A digital object identifier (DOI) is the prefered citation reference. +See https://www.doi.org/ for details. (alternatively specify url)

    +
    + + +
    +
    +
    + url: Optional[bioimageio.spec._internal.url.HttpUrl] + + +
    + + +

    URL to cite (preferably specify a doi instead)

    +
    + + +
    +
    +
    + +
    + + class + LinkedResource(bioimageio.spec._internal.node.Node): + + + +
    + +
    185class LinkedResource(Node):
    +186    """Reference to a bioimage.io resource"""
    +187
    +188    id: ResourceId
    +189    """A valid resource `id` from the official bioimage.io collection."""
    +
    + + +

    Reference to a bioimage.io resource

    +
    + + +
    +
    + id: ResourceId + + +
    + + +

    A valid resource id from the official bioimage.io collection.

    +
    + + +
    +
    +
    + +
    + + class + GenericModelDescrBase(bioimageio.spec._internal.common_nodes.ResourceDescrBase): + + + +
    + +
    192class GenericModelDescrBase(ResourceDescrBase):
    +193    """Base for all resource descriptions including of model descriptions"""
    +194
    +195    name: Annotated[
    +196        Annotated[
    +197            str, RestrictCharacters(string.ascii_letters + string.digits + "_- ()")
    +198        ],
    +199        MinLen(5),
    +200        MaxLen(128),
    +201        warn(MaxLen(64), "Name longer than 64 characters.", INFO),
    +202    ]
    +203    name: Annotated[NotEmpty[str], MaxLen(128)]
    +204    """A human-friendly name of the resource description.
    +205    May only contains letters, digits, underscore, minus, parentheses and spaces."""
    +206
    +207    description: Annotated[
    +208        str, MaxLen(1024), warn(MaxLen(512), "Description longer than 512 characters.")
    +209    ]
    +210    """A string containing a brief description."""
    +211
    +212    covers: Annotated[
    +213        List[CoverImageSource],
    +214        Field(
    +215            examples=[],
    +216            description=(
    +217                "Cover images. Please use an image smaller than 500KB and an aspect"
    +218                " ratio width to height of 2:1 or 1:1.\nThe supported image formats"
    +219                f" are: {VALID_COVER_IMAGE_EXTENSIONS}"
    +220            ),
    +221        ),
    +222    ] = Field(default_factory=list)
    +223    """∈📦 Cover images."""
    +224
    +225    id_emoji: Optional[Annotated[str, Len(min_length=1, max_length=2)]] = None
    +226    """UTF-8 emoji for display alongside the `id`."""
    +227
    +228    authors: NotEmpty[List[Author]]
    +229    """The authors are the creators of this resource description and the primary points of contact."""
    +230
    +231    attachments: List[FileDescr] = Field(default_factory=list)
    +232    """file attachments"""
    +233
    +234    cite: NotEmpty[List[CiteEntry]]
    +235    """citations"""
    +236
    +237    license: Annotated[
    +238        Annotated[
    +239            Union[LicenseId, DeprecatedLicenseId], Field(union_mode="left_to_right")
    +240        ],
    +241        warn(
    +242            LicenseId,
    +243            "{value} is deprecated, see https://spdx.org/licenses/{value}.html",
    +244        ),
    +245        Field(examples=["CC0-1.0", "MIT", "BSD-2-Clause"]),
    +246    ]
    +247    """A [SPDX license identifier](https://spdx.org/licenses/).
    +248    We do not support custom license beyond the SPDX license list, if you need that please
    +249    [open a GitHub issue](https://github.com/bioimage-io/spec-bioimage-io/issues/new/choose)
    +250    to discuss your intentions with the community."""
    +251
    +252    config: Annotated[
    +253        Dict[str, YamlValue],
    +254        Field(
    +255            examples=[
    +256                dict(
    +257                    bioimageio={
    +258                        "my_custom_key": 3837283,
    +259                        "another_key": {"nested": "value"},
    +260                    },
    +261                    imagej={"macro_dir": "path/to/macro/file"},
    +262                )
    +263            ],
    +264        ),
    +265    ] = Field(default_factory=dict)
    +266    """A field for custom configuration that can contain any keys not present in the RDF spec.
    +267    This means you should not store, for example, a GitHub repo URL in `config` since there is a `git_repo` field.
    +268    Keys in `config` may be very specific to a tool or consumer software. To avoid conflicting definitions,
    +269    it is recommended to wrap added configuration into a sub-field named with the specific domain or tool name,
    +270    for example:
    +271    ```yaml
    +272    config:
    +273        bioimageio:  # here is the domain name
    +274            my_custom_key: 3837283
    +275            another_key:
    +276                nested: value
    +277        imagej:       # config specific to ImageJ
    +278            macro_dir: path/to/macro/file
    +279    ```
    +280    If possible, please use [`snake_case`](https://en.wikipedia.org/wiki/Snake_case) for keys in `config`.
    +281    You may want to list linked files additionally under `attachments` to include them when packaging a resource.
    +282    (Packaging a resource means downloading/copying important linked files and creating a ZIP archive that contains
    +283    an altered rdf.yaml file with local references to the downloaded files.)"""
    +284
    +285    git_repo: Annotated[
    +286        Optional[HttpUrl],
    +287        Field(
    +288            examples=[
    +289                "https://github.com/bioimage-io/spec-bioimage-io/tree/main/example_descriptions/models/unet2d_nuclei_broad"
    +290            ],
    +291        ),
    +292    ] = None
    +293    """A URL to the Git repository where the resource is being developed."""
    +294
    +295    icon: Union[
    +296        Annotated[str, Len(min_length=1, max_length=2)], ImportantFileSource, None
    +297    ] = None
    +298    """An icon for illustration, e.g. on bioimage.io"""
    +299
    +300    links: Annotated[
    +301        List[str],
    +302        Field(
    +303            examples=[
    +304                (
    +305                    "ilastik/ilastik",
    +306                    "deepimagej/deepimagej",
    +307                    "zero/notebook_u-net_3d_zerocostdl4mic",
    +308                )
    +309            ],
    +310        ),
    +311    ] = Field(default_factory=list)
    +312    """IDs of other bioimage.io resources"""
    +313
    +314    uploader: Optional[Uploader] = None
    +315    """The person who uploaded the model (e.g. to bioimage.io)"""
    +316
    +317    maintainers: List[Maintainer] = Field(default_factory=list)
    +318    """Maintainers of this resource.
    +319    If not specified, `authors` are maintainers and at least some of them has to specify their `github_user` name"""
    +320
    +321    @partial(as_warning, severity=ALERT)
    +322    @field_validator("maintainers", mode="after")
    +323    @classmethod
    +324    def check_maintainers_exist(
    +325        cls, maintainers: List[Maintainer], info: ValidationInfo
    +326    ) -> List[Maintainer]:
    +327        if not maintainers and "authors" in info.data:
    +328            authors: List[Author] = info.data["authors"]
    +329            if all(a.github_user is None for a in authors):
    +330                raise ValueError(
    +331                    "Missing `maintainers` or any author in `authors` with a specified"
    +332                    + " `github_user` name."
    +333                )
    +334
    +335        return maintainers
    +336
    +337    tags: Annotated[
    +338        List[str],
    +339        Field(examples=[("unet2d", "pytorch", "nucleus", "segmentation", "dsb2018")]),
    +340    ] = Field(default_factory=list)
    +341    """Associated tags"""
    +342
    +343    @as_warning
    +344    @field_validator("tags")
    +345    @classmethod
    +346    def warn_about_tag_categories(
    +347        cls, value: List[str], info: ValidationInfo
    +348    ) -> List[str]:
    +349        categories = TAG_CATEGORIES.get(info.data["type"], {})
    +350        missing_categories: List[Dict[str, Sequence[str]]] = []
    +351        for cat, entries in categories.items():
    +352            if not any(e in value for e in entries):
    +353                missing_categories.append({cat: entries})
    +354
    +355        if missing_categories:
    +356            raise ValueError(
    +357                f"Missing tags from bioimage.io categories: {missing_categories}"
    +358            )
    +359
    +360        return value
    +361
    +362    version: Optional[Version] = None
    +363    """The version of the resource following SemVer 2.0."""
    +364
    +365    @model_validator(mode="before")
    +366    def _remove_version_number(  # pyright: ignore[reportUnknownParameterType]
    +367        cls, value: Union[Any, Dict[Any, Any]]
    +368    ):
    +369        if isinstance(value, dict):
    +370            vn: Any = value.pop("version_number", None)
    +371            if vn is not None and value.get("version") is None:
    +372                value["version"] = vn
    +373
    +374        return value  # pyright: ignore[reportUnknownVariableType]
    +
    + + +

    Base for all resource descriptions including of model descriptions

    +
    + + +
    +
    + name: Annotated[str, MinLen(min_length=1), MaxLen(max_length=128)] + + +
    + + +

    A human-friendly name of the resource description. +May only contains letters, digits, underscore, minus, parentheses and spaces.

    +
    + + +
    +
    +
    + description: Annotated[str, MaxLen(max_length=1024), AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f537133d800>, severity=30, msg='Description longer than 512 characters.', context={'typ': Annotated[Any, MaxLen(max_length=512)]})] + + +
    + + +

    A string containing a brief description.

    +
    + + +
    +
    +
    + covers: Annotated[List[Annotated[Union[Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute)], bioimageio.spec._internal.io.RelativeFilePath, bioimageio.spec._internal.url.HttpUrl], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), WithSuffix(suffix=('.gif', '.jpeg', '.jpg', '.png', '.svg', '.tif', '.tiff'), case_sensitive=False), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, description="Cover images. Please use an image smaller than 500KB and an aspect ratio width to height of 2:1 or 1:1.\nThe supported image formats are: ('.gif', '.jpeg', '.jpg', '.png', '.svg')", examples=[])] + + +
    + + +

    ∈📦 Cover images.

    +
    + + +
    +
    +
    + id_emoji: Optional[Annotated[str, Len(min_length=1, max_length=2)]] + + +
    + + +

    UTF-8 emoji for display alongside the id.

    +
    + + +
    +
    +
    + authors: Annotated[List[Author], MinLen(min_length=1)] + + +
    + + +

    The authors are the creators of this resource description and the primary points of contact.

    +
    + + +
    +
    +
    + attachments: List[bioimageio.spec._internal.io.FileDescr] + + +
    + + +

    file attachments

    +
    + + +
    +
    +
    + cite: Annotated[List[CiteEntry], MinLen(min_length=1)] + + +
    + + +

    citations

    +
    + + +
    +
    +
    + license: Annotated[Union[bioimageio.spec._internal.license_id.LicenseId, bioimageio.spec._internal.license_id.DeprecatedLicenseId], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f537133e0c0>, severity=30, msg='{value} is deprecated, see https://spdx.org/licenses/{value}.html', context={'typ': <class 'bioimageio.spec._internal.license_id.LicenseId'>}), FieldInfo(annotation=NoneType, required=True, examples=['CC0-1.0', 'MIT', 'BSD-2-Clause'])] + + +
    + + +

    A SPDX license identifier. +We do not support custom license beyond the SPDX license list, if you need that please +open a GitHub issue +to discuss your intentions with the community.

    +
    + + +
    +
    +
    + config: Annotated[Dict[str, YamlValue], FieldInfo(annotation=NoneType, required=True, examples=[{'bioimageio': {'my_custom_key': 3837283, 'another_key': {'nested': 'value'}}, 'imagej': {'macro_dir': 'path/to/macro/file'}}])] + + +
    + + +

    A field for custom configuration that can contain any keys not present in the RDF spec. +This means you should not store, for example, a GitHub repo URL in config since there is a git_repo field. +Keys in config may be very specific to a tool or consumer software. To avoid conflicting definitions, +it is recommended to wrap added configuration into a sub-field named with the specific domain or tool name, +for example:

    + +
    +
    config:
    +    bioimageio:  # here is the domain name
    +        my_custom_key: 3837283
    +        another_key:
    +            nested: value
    +    imagej:       # config specific to ImageJ
    +        macro_dir: path/to/macro/file
    +
    +
    + +

    If possible, please use snake_case for keys in config. +You may want to list linked files additionally under attachments to include them when packaging a resource. +(Packaging a resource means downloading/copying important linked files and creating a ZIP archive that contains +an altered rdf.yaml file with local references to the downloaded files.)

    +
    + + +
    +
    +
    + git_repo: Annotated[Optional[bioimageio.spec._internal.url.HttpUrl], FieldInfo(annotation=NoneType, required=True, examples=['https://github.com/bioimage-io/spec-bioimage-io/tree/main/example_descriptions/models/unet2d_nuclei_broad'])] + + +
    + + +

    A URL to the Git repository where the resource is being developed.

    +
    + + +
    +
    +
    + icon: Union[Annotated[str, Len(min_length=1, max_length=2)], Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')], NoneType] + + +
    + + +

    An icon for illustration, e.g. on bioimage.io

    +
    + + +
    + +
    +
    + uploader: Optional[bioimageio.spec.generic.v0_2.Uploader] + + +
    + + +

    The person who uploaded the model (e.g. to bioimage.io)

    +
    + + +
    +
    +
    + maintainers: List[Maintainer] + + +
    + + +

    Maintainers of this resource. +If not specified, authors are maintainers and at least some of them has to specify their github_user name

    +
    + + +
    +
    + +
    + + def + check_maintainers_exist(value: Any, info: pydantic_core.core_schema.ValidationInfo) -> Any: + + + +
    + +
    75    def wrapper(value: Any, info: ValidationInfo) -> Any:
    +76        try:
    +77            call_validator_func(func, mode, value, info)
    +78        except (AssertionError, ValueError) as e:
    +79            issue_warning(
    +80                msg or ",".join(e.args),
    +81                value=value,
    +82                severity=severity,
    +83                msg_context=msg_context,
    +84            )
    +85
    +86        return value
    +
    + + + + +
    +
    +
    + tags: Annotated[List[str], FieldInfo(annotation=NoneType, required=True, examples=[('unet2d', 'pytorch', 'nucleus', 'segmentation', 'dsb2018')])] + + +
    + + +

    Associated tags

    +
    + + +
    +
    + +
    + + def + warn_about_tag_categories(value: Any, info: pydantic_core.core_schema.ValidationInfo) -> Any: + + + +
    + +
    75    def wrapper(value: Any, info: ValidationInfo) -> Any:
    +76        try:
    +77            call_validator_func(func, mode, value, info)
    +78        except (AssertionError, ValueError) as e:
    +79            issue_warning(
    +80                msg or ",".join(e.args),
    +81                value=value,
    +82                severity=severity,
    +83                msg_context=msg_context,
    +84            )
    +85
    +86        return value
    +
    + + + + +
    +
    +
    + version: Optional[bioimageio.spec._internal.version_type.Version] + + +
    + + +

    The version of the resource following SemVer 2.0.

    +
    + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    +
    +
    + +
    + + class + GenericDescrBase(GenericModelDescrBase): + + + +
    + +
    377class GenericDescrBase(GenericModelDescrBase):
    +378    """Base for all resource descriptions except for the model descriptions"""
    +379
    +380    format_version: Literal["0.3.0"] = "0.3.0"
    +381    """The **format** version of this resource specification"""
    +382
    +383    @model_validator(mode="before")
    +384    @classmethod
    +385    def _convert_from_older_format(
    +386        cls, data: BioimageioYamlContent, /
    +387    ) -> BioimageioYamlContent:
    +388        convert_from_older_format(data)
    +389        return data
    +390
    +391    documentation: Annotated[
    +392        Optional[DocumentationSource],
    +393        Field(
    +394            examples=[
    +395                "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
    +396                "README.md",
    +397            ],
    +398        ),
    +399    ] = None
    +400    """∈📦 URL or relative path to a markdown file encoded in UTF-8 with additional documentation.
    +401    The recommended documentation file name is `README.md`. An `.md` suffix is mandatory."""
    +402
    +403    badges: List[BadgeDescr] = Field(default_factory=list)
    +404    """badges associated with this resource"""
    +
    + + +

    Base for all resource descriptions except for the model descriptions

    +
    + + +
    +
    + format_version: Literal['0.3.0'] + + +
    + + +

    The format version of this resource specification

    +
    + + +
    +
    +
    + documentation: Annotated[Optional[Annotated[Union[Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute)], bioimageio.spec._internal.io.RelativeFilePath, bioimageio.spec._internal.url.HttpUrl], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function _validate_md_suffix at 0x7f5380dc7e20>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, examples=['https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md', 'README.md'])] + + +
    + + +

    ∈📦 URL or relative path to a markdown file encoded in UTF-8 with additional documentation. +The recommended documentation file name is README.md. An .md suffix is mandatory.

    +
    + + +
    +
    + + + +

    badges associated with this resource

    +
    + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.3.0' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 3, 0) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    + +
    + + class + GenericDescr(GenericDescrBase): + + + +
    + +
    410class GenericDescr(
    +411    GenericDescrBase, extra="ignore", title="bioimage.io generic specification"
    +412):
    +413    """Specification of the fields used in a generic bioimage.io-compliant resource description file (RDF).
    +414
    +415    An RDF is a YAML file that describes a resource such as a model, a dataset, or a notebook.
    +416    Note that those resources are described with a type-specific RDF.
    +417    Use this generic resource description, if none of the known specific types matches your resource.
    +418    """
    +419
    +420    type: Annotated[str, LowerCase] = Field("generic", frozen=True)
    +421    """The resource type assigns a broad category to the resource."""
    +422
    +423    id: Optional[ResourceId] = None
    +424    """bioimage.io-wide unique resource identifier
    +425    assigned by bioimage.io; version **un**specific."""
    +426
    +427    parent: Optional[ResourceId] = None
    +428    """The description from which this one is derived"""
    +429
    +430    source: Optional[HttpUrl] = None
    +431    """The primary source of the resource"""
    +432
    +433    @field_validator("type", mode="after")
    +434    @classmethod
    +435    def check_specific_types(cls, value: str) -> str:
    +436        if value in KNOWN_SPECIFIC_RESOURCE_TYPES:
    +437            raise ValueError(
    +438                f"Use the {value} description instead of this generic description for"
    +439                + f" your '{value}' resource."
    +440            )
    +441
    +442        return value
    +
    + + +

    Specification of the fields used in a generic bioimage.io-compliant resource description file (RDF).

    + +

    An RDF is a YAML file that describes a resource such as a model, a dataset, or a notebook. +Note that those resources are described with a type-specific RDF. +Use this generic resource description, if none of the known specific types matches your resource.

    +
    + + +
    +
    + type: Annotated[str, Annotated[~_StrType, Predicate(str.islower)]] + + +
    + + +

    The resource type assigns a broad category to the resource.

    +
    + + +
    +
    +
    + id: Optional[ResourceId] + + +
    + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    +
    + parent: Optional[ResourceId] + + +
    + + +

    The description from which this one is derived

    +
    + + +
    +
    +
    + source: Optional[bioimageio.spec._internal.url.HttpUrl] + + +
    + + +

    The primary source of the resource

    +
    + + +
    +
    + +
    +
    @field_validator('type', mode='after')
    +
    @classmethod
    + + def + check_specific_types(cls, value: str) -> str: + + + +
    + +
    433    @field_validator("type", mode="after")
    +434    @classmethod
    +435    def check_specific_types(cls, value: str) -> str:
    +436        if value in KNOWN_SPECIFIC_RESOURCE_TYPES:
    +437            raise ValueError(
    +438                f"Use the {value} description instead of this generic description for"
    +439                + f" your '{value}' resource."
    +440            )
    +441
    +442        return value
    +
    + + + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.3.0' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 3, 0) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    + +
    + + class + LinkedResourceNode(bioimageio.spec._internal.node.Node): + + + +
    + +
    445class LinkedResourceNode(Node):
    +446
    +447    @model_validator(mode="before")
    +448    def _remove_version_number(  # pyright: ignore[reportUnknownParameterType]
    +449        cls, value: Union[Any, Dict[Any, Any]]
    +450    ):
    +451        if isinstance(value, dict):
    +452            vn: Any = value.pop("version_number", None)
    +453            if vn is not None and value.get("version") is None:
    +454                value["version"] = vn
    +455
    +456        return value  # pyright: ignore[reportUnknownVariableType]
    +457
    +458    version: Optional[Version] = None
    +459    """The version of the linked resource following SemVer 2.0."""
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + version: Optional[bioimageio.spec._internal.version_type.Version] + + +
    + + +

    The version of the linked resource following SemVer 2.0.

    +
    + + +
    +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/model.html b/bioimageio/spec/model.html new file mode 100644 index 00000000..90f99b4f --- /dev/null +++ b/bioimageio/spec/model.html @@ -0,0 +1,1662 @@ + + + + + + + bioimageio.spec.model API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.model

    + +

    implementaions of all released minor versions are available in submodules:

    + + +
    + + + + + +
     1# autogen: start
    + 2"""
    + 3implementaions of all released minor versions are available in submodules:
    + 4- model v0_4: `bioimageio.spec.model.v0_4.ModelDescr`
    + 5- model v0_5: `bioimageio.spec.model.v0_5.ModelDescr`
    + 6"""
    + 7
    + 8from typing import Union
    + 9
    +10from pydantic import Discriminator
    +11from typing_extensions import Annotated
    +12
    +13from . import v0_4, v0_5
    +14
    +15ModelDescr = v0_5.ModelDescr
    +16ModelDescr_v0_4 = v0_4.ModelDescr
    +17ModelDescr_v0_5 = v0_5.ModelDescr
    +18
    +19AnyModelDescr = Annotated[
    +20    Union[ModelDescr_v0_4, ModelDescr_v0_5], Discriminator("format_version")
    +21]
    +22"""Union of any released model desription"""
    +23# autogen: stop
    +
    + + +
    +
    + +
    + + class + ModelDescr(bioimageio.spec.generic.v0_3.GenericModelDescrBase): + + + +
    + +
    2062class ModelDescr(GenericModelDescrBase, title="bioimage.io model specification"):
    +2063    """Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights.
    +2064    These fields are typically stored in a YAML file which we call a model resource description file (model RDF).
    +2065    """
    +2066
    +2067    format_version: Literal["0.5.3"] = "0.5.3"
    +2068    """Version of the bioimage.io model description specification used.
    +2069    When creating a new model always use the latest micro/patch version described here.
    +2070    The `format_version` is important for any consumer software to understand how to parse the fields.
    +2071    """
    +2072
    +2073    type: Literal["model"] = "model"
    +2074    """Specialized resource type 'model'"""
    +2075
    +2076    id: Optional[ModelId] = None
    +2077    """bioimage.io-wide unique resource identifier
    +2078    assigned by bioimage.io; version **un**specific."""
    +2079
    +2080    authors: NotEmpty[List[Author]]
    +2081    """The authors are the creators of the model RDF and the primary points of contact."""
    +2082
    +2083    documentation: Annotated[
    +2084        DocumentationSource,
    +2085        Field(
    +2086            examples=[
    +2087                "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
    +2088                "README.md",
    +2089            ],
    +2090        ),
    +2091    ]
    +2092    """∈📦 URL or relative path to a markdown file with additional documentation.
    +2093    The recommended documentation file name is `README.md`. An `.md` suffix is mandatory.
    +2094    The documentation should include a '#[#] Validation' (sub)section
    +2095    with details on how to quantitatively validate the model on unseen data."""
    +2096
    +2097    @field_validator("documentation", mode="after")
    +2098    @classmethod
    +2099    def _validate_documentation(cls, value: DocumentationSource) -> DocumentationSource:
    +2100        if not validation_context_var.get().perform_io_checks:
    +2101            return value
    +2102
    +2103        doc_path = download(value).path
    +2104        doc_content = doc_path.read_text(encoding="utf-8")
    +2105        assert isinstance(doc_content, str)
    +2106        if not re.match("#.*[vV]alidation", doc_content):
    +2107            issue_warning(
    +2108                "No '# Validation' (sub)section found in {value}.",
    +2109                value=value,
    +2110                field="documentation",
    +2111            )
    +2112
    +2113        return value
    +2114
    +2115    inputs: NotEmpty[Sequence[InputTensorDescr]]
    +2116    """Describes the input tensors expected by this model."""
    +2117
    +2118    @field_validator("inputs", mode="after")
    +2119    @classmethod
    +2120    def _validate_input_axes(
    +2121        cls, inputs: Sequence[InputTensorDescr]
    +2122    ) -> Sequence[InputTensorDescr]:
    +2123        input_size_refs = cls._get_axes_with_independent_size(inputs)
    +2124
    +2125        for i, ipt in enumerate(inputs):
    +2126            valid_independent_refs: Dict[
    +2127                Tuple[TensorId, AxisId],
    +2128                Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    +2129            ] = {
    +2130                **{
    +2131                    (ipt.id, a.id): (ipt, a, a.size)
    +2132                    for a in ipt.axes
    +2133                    if not isinstance(a, BatchAxis)
    +2134                    and isinstance(a.size, (int, ParameterizedSize))
    +2135                },
    +2136                **input_size_refs,
    +2137            }
    +2138            for a, ax in enumerate(ipt.axes):
    +2139                cls._validate_axis(
    +2140                    "inputs",
    +2141                    i=i,
    +2142                    tensor_id=ipt.id,
    +2143                    a=a,
    +2144                    axis=ax,
    +2145                    valid_independent_refs=valid_independent_refs,
    +2146                )
    +2147        return inputs
    +2148
    +2149    @staticmethod
    +2150    def _validate_axis(
    +2151        field_name: str,
    +2152        i: int,
    +2153        tensor_id: TensorId,
    +2154        a: int,
    +2155        axis: AnyAxis,
    +2156        valid_independent_refs: Dict[
    +2157            Tuple[TensorId, AxisId],
    +2158            Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    +2159        ],
    +2160    ):
    +2161        if isinstance(axis, BatchAxis) or isinstance(
    +2162            axis.size, (int, ParameterizedSize, DataDependentSize)
    +2163        ):
    +2164            return
    +2165        elif not isinstance(axis.size, SizeReference):
    +2166            assert_never(axis.size)
    +2167
    +2168        # validate axis.size SizeReference
    +2169        ref = (axis.size.tensor_id, axis.size.axis_id)
    +2170        if ref not in valid_independent_refs:
    +2171            raise ValueError(
    +2172                "Invalid tensor axis reference at"
    +2173                + f" {field_name}[{i}].axes[{a}].size: {axis.size}."
    +2174            )
    +2175        if ref == (tensor_id, axis.id):
    +2176            raise ValueError(
    +2177                "Self-referencing not allowed for"
    +2178                + f" {field_name}[{i}].axes[{a}].size: {axis.size}"
    +2179            )
    +2180        if axis.type == "channel":
    +2181            if valid_independent_refs[ref][1].type != "channel":
    +2182                raise ValueError(
    +2183                    "A channel axis' size may only reference another fixed size"
    +2184                    + " channel axis."
    +2185                )
    +2186            if isinstance(axis.channel_names, str) and "{i}" in axis.channel_names:
    +2187                ref_size = valid_independent_refs[ref][2]
    +2188                assert isinstance(ref_size, int), (
    +2189                    "channel axis ref (another channel axis) has to specify fixed"
    +2190                    + " size"
    +2191                )
    +2192                generated_channel_names = [
    +2193                    Identifier(axis.channel_names.format(i=i))
    +2194                    for i in range(1, ref_size + 1)
    +2195                ]
    +2196                axis.channel_names = generated_channel_names
    +2197
    +2198        if (ax_unit := getattr(axis, "unit", None)) != (
    +2199            ref_unit := getattr(valid_independent_refs[ref][1], "unit", None)
    +2200        ):
    +2201            raise ValueError(
    +2202                "The units of an axis and its reference axis need to match, but"
    +2203                + f" '{ax_unit}' != '{ref_unit}'."
    +2204            )
    +2205        ref_axis = valid_independent_refs[ref][1]
    +2206        if isinstance(ref_axis, BatchAxis):
    +2207            raise ValueError(
    +2208                f"Invalid reference axis '{ref_axis.id}' for {tensor_id}.{axis.id}"
    +2209                + " (a batch axis is not allowed as reference)."
    +2210            )
    +2211
    +2212        if isinstance(axis, WithHalo):
    +2213            min_size = axis.size.get_size(axis, ref_axis, n=0)
    +2214            if (min_size - 2 * axis.halo) < 1:
    +2215                raise ValueError(
    +2216                    f"axis {axis.id} with minimum size {min_size} is too small for halo"
    +2217                    + f" {axis.halo}."
    +2218                )
    +2219
    +2220            input_halo = axis.halo * axis.scale / ref_axis.scale
    +2221            if input_halo != int(input_halo) or input_halo % 2 == 1:
    +2222                raise ValueError(
    +2223                    f"input_halo {input_halo} (output_halo {axis.halo} *"
    +2224                    + f" output_scale {axis.scale} / input_scale {ref_axis.scale})"
    +2225                    + f" is not an even integer for {tensor_id}.{axis.id}."
    +2226                )
    +2227
    +2228    @model_validator(mode="after")
    +2229    def _validate_test_tensors(self) -> Self:
    +2230        if not validation_context_var.get().perform_io_checks:
    +2231            return self
    +2232
    +2233        test_arrays = [
    +2234            load_array(descr.test_tensor.download().path)
    +2235            for descr in chain(self.inputs, self.outputs)
    +2236        ]
    +2237        tensors = {
    +2238            descr.id: (descr, array)
    +2239            for descr, array in zip(chain(self.inputs, self.outputs), test_arrays)
    +2240        }
    +2241        validate_tensors(tensors, tensor_origin="test_tensor")
    +2242        return self
    +2243
    +2244    @model_validator(mode="after")
    +2245    def _validate_tensor_references_in_proc_kwargs(self, info: ValidationInfo) -> Self:
    +2246        ipt_refs = {t.id for t in self.inputs}
    +2247        out_refs = {t.id for t in self.outputs}
    +2248        for ipt in self.inputs:
    +2249            for p in ipt.preprocessing:
    +2250                ref = p.kwargs.get("reference_tensor")
    +2251                if ref is None:
    +2252                    continue
    +2253                if ref not in ipt_refs:
    +2254                    raise ValueError(
    +2255                        f"`reference_tensor` '{ref}' not found. Valid input tensor"
    +2256                        + f" references are: {ipt_refs}."
    +2257                    )
    +2258
    +2259        for out in self.outputs:
    +2260            for p in out.postprocessing:
    +2261                ref = p.kwargs.get("reference_tensor")
    +2262                if ref is None:
    +2263                    continue
    +2264
    +2265                if ref not in ipt_refs and ref not in out_refs:
    +2266                    raise ValueError(
    +2267                        f"`reference_tensor` '{ref}' not found. Valid tensor references"
    +2268                        + f" are: {ipt_refs | out_refs}."
    +2269                    )
    +2270
    +2271        return self
    +2272
    +2273    # TODO: use validate funcs in validate_test_tensors
    +2274    # def validate_inputs(self, input_tensors: Mapping[TensorId, NDArray[Any]]) -> Mapping[TensorId, NDArray[Any]]:
    +2275
    +2276    name: Annotated[
    +2277        Annotated[
    +2278            str, RestrictCharacters(string.ascii_letters + string.digits + "_- ()")
    +2279        ],
    +2280        MinLen(5),
    +2281        MaxLen(128),
    +2282        warn(MaxLen(64), "Name longer than 64 characters.", INFO),
    +2283    ]
    +2284    """A human-readable name of this model.
    +2285    It should be no longer than 64 characters
    +2286    and may only contain letter, number, underscore, minus, parentheses and spaces.
    +2287    We recommend to chose a name that refers to the model's task and image modality.
    +2288    """
    +2289
    +2290    outputs: NotEmpty[Sequence[OutputTensorDescr]]
    +2291    """Describes the output tensors."""
    +2292
    +2293    @field_validator("outputs", mode="after")
    +2294    @classmethod
    +2295    def _validate_tensor_ids(
    +2296        cls, outputs: Sequence[OutputTensorDescr], info: ValidationInfo
    +2297    ) -> Sequence[OutputTensorDescr]:
    +2298        tensor_ids = [
    +2299            t.id for t in info.data.get("inputs", []) + info.data.get("outputs", [])
    +2300        ]
    +2301        duplicate_tensor_ids: List[str] = []
    +2302        seen: Set[str] = set()
    +2303        for t in tensor_ids:
    +2304            if t in seen:
    +2305                duplicate_tensor_ids.append(t)
    +2306
    +2307            seen.add(t)
    +2308
    +2309        if duplicate_tensor_ids:
    +2310            raise ValueError(f"Duplicate tensor ids: {duplicate_tensor_ids}")
    +2311
    +2312        return outputs
    +2313
    +2314    @staticmethod
    +2315    def _get_axes_with_parameterized_size(
    +2316        io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
    +2317    ):
    +2318        return {
    +2319            f"{t.id}.{a.id}": (t, a, a.size)
    +2320            for t in io
    +2321            for a in t.axes
    +2322            if not isinstance(a, BatchAxis) and isinstance(a.size, ParameterizedSize)
    +2323        }
    +2324
    +2325    @staticmethod
    +2326    def _get_axes_with_independent_size(
    +2327        io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
    +2328    ):
    +2329        return {
    +2330            (t.id, a.id): (t, a, a.size)
    +2331            for t in io
    +2332            for a in t.axes
    +2333            if not isinstance(a, BatchAxis)
    +2334            and isinstance(a.size, (int, ParameterizedSize))
    +2335        }
    +2336
    +2337    @field_validator("outputs", mode="after")
    +2338    @classmethod
    +2339    def _validate_output_axes(
    +2340        cls, outputs: List[OutputTensorDescr], info: ValidationInfo
    +2341    ) -> List[OutputTensorDescr]:
    +2342        input_size_refs = cls._get_axes_with_independent_size(
    +2343            info.data.get("inputs", [])
    +2344        )
    +2345        output_size_refs = cls._get_axes_with_independent_size(outputs)
    +2346
    +2347        for i, out in enumerate(outputs):
    +2348            valid_independent_refs: Dict[
    +2349                Tuple[TensorId, AxisId],
    +2350                Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    +2351            ] = {
    +2352                **{
    +2353                    (out.id, a.id): (out, a, a.size)
    +2354                    for a in out.axes
    +2355                    if not isinstance(a, BatchAxis)
    +2356                    and isinstance(a.size, (int, ParameterizedSize))
    +2357                },
    +2358                **input_size_refs,
    +2359                **output_size_refs,
    +2360            }
    +2361            for a, ax in enumerate(out.axes):
    +2362                cls._validate_axis(
    +2363                    "outputs",
    +2364                    i,
    +2365                    out.id,
    +2366                    a,
    +2367                    ax,
    +2368                    valid_independent_refs=valid_independent_refs,
    +2369                )
    +2370
    +2371        return outputs
    +2372
    +2373    packaged_by: List[Author] = Field(default_factory=list)
    +2374    """The persons that have packaged and uploaded this model.
    +2375    Only required if those persons differ from the `authors`."""
    +2376
    +2377    parent: Optional[LinkedModel] = None
    +2378    """The model from which this model is derived, e.g. by fine-tuning the weights."""
    +2379
    +2380    # todo: add parent self check once we have `id`
    +2381    # @model_validator(mode="after")
    +2382    # def validate_parent_is_not_self(self) -> Self:
    +2383    #     if self.parent is not None and self.parent == self.id:
    +2384    #         raise ValueError("The model may not reference itself as parent model")
    +2385
    +2386    #     return self
    +2387
    +2388    run_mode: Annotated[
    +2389        Optional[RunMode],
    +2390        warn(None, "Run mode '{value}' has limited support across consumer softwares."),
    +2391    ] = None
    +2392    """Custom run mode for this model: for more complex prediction procedures like test time
    +2393    data augmentation that currently cannot be expressed in the specification.
    +2394    No standard run modes are defined yet."""
    +2395
    +2396    timestamp: Datetime = Datetime(datetime.now())
    +2397    """Timestamp in [ISO 8601](#https://en.wikipedia.org/wiki/ISO_8601) format
    +2398    with a few restrictions listed [here](https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat).
    +2399    (In Python a datetime object is valid, too)."""
    +2400
    +2401    training_data: Annotated[
    +2402        Union[None, LinkedDataset, DatasetDescr, DatasetDescr02],
    +2403        Field(union_mode="left_to_right"),
    +2404    ] = None
    +2405    """The dataset used to train this model"""
    +2406
    +2407    weights: Annotated[WeightsDescr, WrapSerializer(package_weights)]
    +2408    """The weights for this model.
    +2409    Weights can be given for different formats, but should otherwise be equivalent.
    +2410    The available weight formats determine which consumers can use this model."""
    +2411
    +2412    @model_validator(mode="after")
    +2413    def _add_default_cover(self) -> Self:
    +2414        if not validation_context_var.get().perform_io_checks or self.covers:
    +2415            return self
    +2416
    +2417        try:
    +2418            generated_covers = generate_covers(
    +2419                [(t, load_array(t.test_tensor.download().path)) for t in self.inputs],
    +2420                [(t, load_array(t.test_tensor.download().path)) for t in self.outputs],
    +2421            )
    +2422        except Exception as e:
    +2423            issue_warning(
    +2424                "Failed to generate cover image(s): {e}",
    +2425                value=self.covers,
    +2426                msg_context=dict(e=e),
    +2427                field="covers",
    +2428            )
    +2429        else:
    +2430            self.covers.extend(generated_covers)
    +2431
    +2432        return self
    +2433
    +2434    def get_input_test_arrays(self) -> List[NDArray[Any]]:
    +2435        data = [load_array(ipt.test_tensor.download().path) for ipt in self.inputs]
    +2436        assert all(isinstance(d, np.ndarray) for d in data)
    +2437        return data
    +2438
    +2439    def get_output_test_arrays(self) -> List[NDArray[Any]]:
    +2440        data = [load_array(out.test_tensor.download().path) for out in self.outputs]
    +2441        assert all(isinstance(d, np.ndarray) for d in data)
    +2442        return data
    +2443
    +2444    @staticmethod
    +2445    def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int:
    +2446        batch_size = 1
    +2447        tensor_with_batchsize: Optional[TensorId] = None
    +2448        for tid in tensor_sizes:
    +2449            for aid, s in tensor_sizes[tid].items():
    +2450                if aid != BATCH_AXIS_ID or s == 1 or s == batch_size:
    +2451                    continue
    +2452
    +2453                if batch_size != 1:
    +2454                    assert tensor_with_batchsize is not None
    +2455                    raise ValueError(
    +2456                        f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})"
    +2457                    )
    +2458
    +2459                batch_size = s
    +2460                tensor_with_batchsize = tid
    +2461
    +2462        return batch_size
    +2463
    +2464    def get_output_tensor_sizes(
    +2465        self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]
    +2466    ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]:
    +2467        """Returns the tensor output sizes for given **input_sizes**.
    +2468        Only if **input_sizes** has a valid input shape, the tensor output size is exact.
    +2469        Otherwise it might be larger than the actual (valid) output"""
    +2470        batch_size = self.get_batch_size(input_sizes)
    +2471        ns = self.get_ns(input_sizes)
    +2472
    +2473        tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size)
    +2474        return tensor_sizes.outputs
    +2475
    +2476    def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]):
    +2477        """get parameter `n` for each parameterized axis
    +2478        such that the valid input size is >= the given input size"""
    +2479        ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {}
    +2480        axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs}
    +2481        for tid in input_sizes:
    +2482            for aid, s in input_sizes[tid].items():
    +2483                size_descr = axes[tid][aid].size
    +2484                if isinstance(size_descr, ParameterizedSize):
    +2485                    ret[(tid, aid)] = size_descr.get_n(s)
    +2486                elif size_descr is None or isinstance(size_descr, (int, SizeReference)):
    +2487                    pass
    +2488                else:
    +2489                    assert_never(size_descr)
    +2490
    +2491        return ret
    +2492
    +2493    def get_tensor_sizes(
    +2494        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
    +2495    ) -> _TensorSizes:
    +2496        axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size)
    +2497        return _TensorSizes(
    +2498            {
    +2499                t: {
    +2500                    aa: axis_sizes.inputs[(tt, aa)]
    +2501                    for tt, aa in axis_sizes.inputs
    +2502                    if tt == t
    +2503                }
    +2504                for t in {tt for tt, _ in axis_sizes.inputs}
    +2505            },
    +2506            {
    +2507                t: {
    +2508                    aa: axis_sizes.outputs[(tt, aa)]
    +2509                    for tt, aa in axis_sizes.outputs
    +2510                    if tt == t
    +2511                }
    +2512                for t in {tt for tt, _ in axis_sizes.outputs}
    +2513            },
    +2514        )
    +2515
    +2516    def get_axis_sizes(
    +2517        self,
    +2518        ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N],
    +2519        batch_size: Optional[int] = None,
    +2520        *,
    +2521        max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None,
    +2522    ) -> _AxisSizes:
    +2523        """Determine input and output block shape for scale factors **ns**
    +2524        of parameterized input sizes.
    +2525
    +2526        Args:
    +2527            ns: Scale factor `n` for each axis (keyed by (tensor_id, axis_id))
    +2528                that is parameterized as `size = min + n * step`.
    +2529            batch_size: The desired size of the batch dimension.
    +2530                If given **batch_size** overwrites any batch size present in
    +2531                **max_input_shape**. Default 1.
    +2532            max_input_shape: Limits the derived block shapes.
    +2533                Each axis for which the input size, parameterized by `n`, is larger
    +2534                than **max_input_shape** is set to the minimal value `n_min` for which
    +2535                this is still true.
    +2536                Use this for small input samples or large values of **ns**.
    +2537                Or simply whenever you know the full input shape.
    +2538
    +2539        Returns:
    +2540            Resolved axis sizes for model inputs and outputs.
    +2541        """
    +2542        max_input_shape = max_input_shape or {}
    +2543        if batch_size is None:
    +2544            for (_t_id, a_id), s in max_input_shape.items():
    +2545                if a_id == BATCH_AXIS_ID:
    +2546                    batch_size = s
    +2547                    break
    +2548            else:
    +2549                batch_size = 1
    +2550
    +2551        all_axes = {
    +2552            t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs)
    +2553        }
    +2554
    +2555        inputs: Dict[Tuple[TensorId, AxisId], int] = {}
    +2556        outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {}
    +2557
    +2558        def get_axis_size(a: Union[InputAxis, OutputAxis]):
    +2559            if isinstance(a, BatchAxis):
    +2560                if (t_descr.id, a.id) in ns:
    +2561                    logger.warning(
    +2562                        "Ignoring unexpected size increment factor (n) for batch axis"
    +2563                        + " of tensor '{}'.",
    +2564                        t_descr.id,
    +2565                    )
    +2566                return batch_size
    +2567            elif isinstance(a.size, int):
    +2568                if (t_descr.id, a.id) in ns:
    +2569                    logger.warning(
    +2570                        "Ignoring unexpected size increment factor (n) for fixed size"
    +2571                        + " axis '{}' of tensor '{}'.",
    +2572                        a.id,
    +2573                        t_descr.id,
    +2574                    )
    +2575                return a.size
    +2576            elif isinstance(a.size, ParameterizedSize):
    +2577                if (t_descr.id, a.id) not in ns:
    +2578                    raise ValueError(
    +2579                        "Size increment factor (n) missing for parametrized axis"
    +2580                        + f" '{a.id}' of tensor '{t_descr.id}'."
    +2581                    )
    +2582                n = ns[(t_descr.id, a.id)]
    +2583                s_max = max_input_shape.get((t_descr.id, a.id))
    +2584                if s_max is not None:
    +2585                    n = min(n, a.size.get_n(s_max))
    +2586
    +2587                return a.size.get_size(n)
    +2588
    +2589            elif isinstance(a.size, SizeReference):
    +2590                if (t_descr.id, a.id) in ns:
    +2591                    logger.warning(
    +2592                        "Ignoring unexpected size increment factor (n) for axis '{}'"
    +2593                        + " of tensor '{}' with size reference.",
    +2594                        a.id,
    +2595                        t_descr.id,
    +2596                    )
    +2597                assert not isinstance(a, BatchAxis)
    +2598                ref_axis = all_axes[a.size.tensor_id][a.size.axis_id]
    +2599                assert not isinstance(ref_axis, BatchAxis)
    +2600                ref_key = (a.size.tensor_id, a.size.axis_id)
    +2601                ref_size = inputs.get(ref_key, outputs.get(ref_key))
    +2602                assert ref_size is not None, ref_key
    +2603                assert not isinstance(ref_size, _DataDepSize), ref_key
    +2604                return a.size.get_size(
    +2605                    axis=a,
    +2606                    ref_axis=ref_axis,
    +2607                    ref_size=ref_size,
    +2608                )
    +2609            elif isinstance(a.size, DataDependentSize):
    +2610                if (t_descr.id, a.id) in ns:
    +2611                    logger.warning(
    +2612                        "Ignoring unexpected increment factor (n) for data dependent"
    +2613                        + " size axis '{}' of tensor '{}'.",
    +2614                        a.id,
    +2615                        t_descr.id,
    +2616                    )
    +2617                return _DataDepSize(a.size.min, a.size.max)
    +2618            else:
    +2619                assert_never(a.size)
    +2620
    +2621        # first resolve all , but the `SizeReference` input sizes
    +2622        for t_descr in self.inputs:
    +2623            for a in t_descr.axes:
    +2624                if not isinstance(a.size, SizeReference):
    +2625                    s = get_axis_size(a)
    +2626                    assert not isinstance(s, _DataDepSize)
    +2627                    inputs[t_descr.id, a.id] = s
    +2628
    +2629        # resolve all other input axis sizes
    +2630        for t_descr in self.inputs:
    +2631            for a in t_descr.axes:
    +2632                if isinstance(a.size, SizeReference):
    +2633                    s = get_axis_size(a)
    +2634                    assert not isinstance(s, _DataDepSize)
    +2635                    inputs[t_descr.id, a.id] = s
    +2636
    +2637        # resolve all output axis sizes
    +2638        for t_descr in self.outputs:
    +2639            for a in t_descr.axes:
    +2640                assert not isinstance(a.size, ParameterizedSize)
    +2641                s = get_axis_size(a)
    +2642                outputs[t_descr.id, a.id] = s
    +2643
    +2644        return _AxisSizes(inputs=inputs, outputs=outputs)
    +2645
    +2646    @model_validator(mode="before")
    +2647    @classmethod
    +2648    def _convert(cls, data: Dict[str, Any]) -> Dict[str, Any]:
    +2649        if (
    +2650            data.get("type") == "model"
    +2651            and isinstance(fv := data.get("format_version"), str)
    +2652            and fv.count(".") == 2
    +2653        ):
    +2654            fv_parts = fv.split(".")
    +2655            if any(not p.isdigit() for p in fv_parts):
    +2656                return data
    +2657
    +2658            fv_tuple = tuple(map(int, fv_parts))
    +2659
    +2660            assert cls.implemented_format_version_tuple[0:2] == (0, 5)
    +2661            if fv_tuple[:2] in ((0, 3), (0, 4)):
    +2662                m04 = _ModelDescr_v0_4.load(data)
    +2663                if not isinstance(m04, InvalidDescr):
    +2664                    return _model_conv.convert_as_dict(m04)
    +2665            elif fv_tuple[:2] == (0, 5):
    +2666                # bump patch version
    +2667                data["format_version"] = cls.implemented_format_version
    +2668
    +2669        return data
    +
    + + +

    Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights. +These fields are typically stored in a YAML file which we call a model resource description file (model RDF).

    +
    + + +
    +
    + format_version: Literal['0.5.3'] + + +
    + + +

    Version of the bioimage.io model description specification used. +When creating a new model always use the latest micro/patch version described here. +The format_version is important for any consumer software to understand how to parse the fields.

    +
    + + +
    +
    +
    + type: Literal['model'] + + +
    + + +

    Specialized resource type 'model'

    +
    + + +
    +
    +
    + id: Optional[bioimageio.spec.model.v0_5.ModelId] + + +
    + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    +
    + authors: Annotated[List[bioimageio.spec.generic.v0_3.Author], MinLen(min_length=1)] + + +
    + + +

    The authors are the creators of the model RDF and the primary points of contact.

    +
    + + +
    +
    +
    + documentation: Annotated[Union[Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute)], bioimageio.spec._internal.io.RelativeFilePath, bioimageio.spec._internal.url.HttpUrl], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function _validate_md_suffix at 0x7f5380dc7e20>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none'), FieldInfo(annotation=NoneType, required=True, examples=['https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md', 'README.md'])] + + +
    + + +

    ∈📦 URL or relative path to a markdown file with additional documentation. +The recommended documentation file name is README.md. An .md suffix is mandatory. +The documentation should include a '#[#] Validation' (sub)section +with details on how to quantitatively validate the model on unseen data.

    +
    + + +
    +
    +
    + inputs: Annotated[Sequence[bioimageio.spec.model.v0_5.InputTensorDescr], MinLen(min_length=1)] + + +
    + + +

    Describes the input tensors expected by this model.

    +
    + + +
    +
    +
    + name: Annotated[str, RestrictCharacters(alphabet='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_- ()'), MinLen(min_length=5), MaxLen(max_length=128), AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f536fa91c60>, severity=20, msg='Name longer than 64 characters.', context={'typ': Annotated[Any, MaxLen(max_length=64)]})] + + +
    + + +

    A human-readable name of this model. +It should be no longer than 64 characters +and may only contain letter, number, underscore, minus, parentheses and spaces. +We recommend to chose a name that refers to the model's task and image modality.

    +
    + + +
    +
    +
    + outputs: Annotated[Sequence[bioimageio.spec.model.v0_5.OutputTensorDescr], MinLen(min_length=1)] + + +
    + + +

    Describes the output tensors.

    +
    + + +
    +
    +
    + packaged_by: List[bioimageio.spec.generic.v0_3.Author] + + +
    + + +

    The persons that have packaged and uploaded this model. +Only required if those persons differ from the authors.

    +
    + + +
    +
    +
    + parent: Optional[bioimageio.spec.model.v0_5.LinkedModel] + + +
    + + +

    The model from which this model is derived, e.g. by fine-tuning the weights.

    +
    + + +
    +
    +
    + run_mode: Annotated[Optional[bioimageio.spec.model.v0_4.RunMode], AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f536fa90e00>, severity=30, msg="Run mode '{value}' has limited support across consumer softwares.", context={'typ': None})] + + +
    + + +

    Custom run mode for this model: for more complex prediction procedures like test time +data augmentation that currently cannot be expressed in the specification. +No standard run modes are defined yet.

    +
    + + +
    +
    +
    + timestamp: bioimageio.spec._internal.types.Datetime + + +
    + + +

    Timestamp in ISO 8601 format +with a few restrictions listed here. +(In Python a datetime object is valid, too).

    +
    + + +
    +
    +
    + training_data: Annotated[Union[NoneType, bioimageio.spec.dataset.v0_3.LinkedDataset, bioimageio.spec.DatasetDescr, bioimageio.spec.dataset.v0_2.DatasetDescr], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])] + + +
    + + +

    The dataset used to train this model

    +
    + + +
    +
    +
    + weights: Annotated[bioimageio.spec.model.v0_5.WeightsDescr, WrapSerializer(func=<function package_weights at 0x7f537f154360>, return_type=PydanticUndefined, when_used='always')] + + +
    + + +

    The weights for this model. +Weights can be given for different formats, but should otherwise be equivalent. +The available weight formats determine which consumers can use this model.

    +
    + + +
    +
    + +
    + + def + get_input_test_arrays(self) -> List[numpy.ndarray[Any, numpy.dtype[Any]]]: + + + +
    + +
    2434    def get_input_test_arrays(self) -> List[NDArray[Any]]:
    +2435        data = [load_array(ipt.test_tensor.download().path) for ipt in self.inputs]
    +2436        assert all(isinstance(d, np.ndarray) for d in data)
    +2437        return data
    +
    + + + + +
    +
    + +
    + + def + get_output_test_arrays(self) -> List[numpy.ndarray[Any, numpy.dtype[Any]]]: + + + +
    + +
    2439    def get_output_test_arrays(self) -> List[NDArray[Any]]:
    +2440        data = [load_array(out.test_tensor.download().path) for out in self.outputs]
    +2441        assert all(isinstance(d, np.ndarray) for d in data)
    +2442        return data
    +
    + + + + +
    +
    + +
    +
    @staticmethod
    + + def + get_batch_size( tensor_sizes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]]) -> int: + + + +
    + +
    2444    @staticmethod
    +2445    def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int:
    +2446        batch_size = 1
    +2447        tensor_with_batchsize: Optional[TensorId] = None
    +2448        for tid in tensor_sizes:
    +2449            for aid, s in tensor_sizes[tid].items():
    +2450                if aid != BATCH_AXIS_ID or s == 1 or s == batch_size:
    +2451                    continue
    +2452
    +2453                if batch_size != 1:
    +2454                    assert tensor_with_batchsize is not None
    +2455                    raise ValueError(
    +2456                        f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})"
    +2457                    )
    +2458
    +2459                batch_size = s
    +2460                tensor_with_batchsize = tid
    +2461
    +2462        return batch_size
    +
    + + + + +
    +
    + +
    + + def + get_output_tensor_sizes( self, input_sizes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]]) -> Dict[bioimageio.spec.model.v0_5.TensorId, Dict[bioimageio.spec.model.v0_5.AxisId, Union[int, bioimageio.spec.model.v0_5._DataDepSize]]]: + + + +
    + +
    2464    def get_output_tensor_sizes(
    +2465        self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]
    +2466    ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]:
    +2467        """Returns the tensor output sizes for given **input_sizes**.
    +2468        Only if **input_sizes** has a valid input shape, the tensor output size is exact.
    +2469        Otherwise it might be larger than the actual (valid) output"""
    +2470        batch_size = self.get_batch_size(input_sizes)
    +2471        ns = self.get_ns(input_sizes)
    +2472
    +2473        tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size)
    +2474        return tensor_sizes.outputs
    +
    + + +

    Returns the tensor output sizes for given input_sizes. +Only if input_sizes has a valid input shape, the tensor output size is exact. +Otherwise it might be larger than the actual (valid) output

    +
    + + +
    +
    + +
    + + def + get_ns( self, input_sizes: Mapping[bioimageio.spec.model.v0_5.TensorId, Mapping[bioimageio.spec.model.v0_5.AxisId, int]]): + + + +
    + +
    2476    def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]):
    +2477        """get parameter `n` for each parameterized axis
    +2478        such that the valid input size is >= the given input size"""
    +2479        ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {}
    +2480        axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs}
    +2481        for tid in input_sizes:
    +2482            for aid, s in input_sizes[tid].items():
    +2483                size_descr = axes[tid][aid].size
    +2484                if isinstance(size_descr, ParameterizedSize):
    +2485                    ret[(tid, aid)] = size_descr.get_n(s)
    +2486                elif size_descr is None or isinstance(size_descr, (int, SizeReference)):
    +2487                    pass
    +2488                else:
    +2489                    assert_never(size_descr)
    +2490
    +2491        return ret
    +
    + + +

    get parameter n for each parameterized axis +such that the valid input size is >= the given input size

    +
    + + +
    +
    + +
    + + def + get_tensor_sizes( self, ns: Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], batch_size: int) -> bioimageio.spec.model.v0_5._TensorSizes: + + + +
    + +
    2493    def get_tensor_sizes(
    +2494        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
    +2495    ) -> _TensorSizes:
    +2496        axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size)
    +2497        return _TensorSizes(
    +2498            {
    +2499                t: {
    +2500                    aa: axis_sizes.inputs[(tt, aa)]
    +2501                    for tt, aa in axis_sizes.inputs
    +2502                    if tt == t
    +2503                }
    +2504                for t in {tt for tt, _ in axis_sizes.inputs}
    +2505            },
    +2506            {
    +2507                t: {
    +2508                    aa: axis_sizes.outputs[(tt, aa)]
    +2509                    for tt, aa in axis_sizes.outputs
    +2510                    if tt == t
    +2511                }
    +2512                for t in {tt for tt, _ in axis_sizes.outputs}
    +2513            },
    +2514        )
    +
    + + + + +
    +
    + +
    + + def + get_axis_sizes( self, ns: Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], batch_size: Optional[int] = None, *, max_input_shape: Optional[Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int]] = None) -> bioimageio.spec.model.v0_5._AxisSizes: + + + +
    + +
    2516    def get_axis_sizes(
    +2517        self,
    +2518        ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N],
    +2519        batch_size: Optional[int] = None,
    +2520        *,
    +2521        max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None,
    +2522    ) -> _AxisSizes:
    +2523        """Determine input and output block shape for scale factors **ns**
    +2524        of parameterized input sizes.
    +2525
    +2526        Args:
    +2527            ns: Scale factor `n` for each axis (keyed by (tensor_id, axis_id))
    +2528                that is parameterized as `size = min + n * step`.
    +2529            batch_size: The desired size of the batch dimension.
    +2530                If given **batch_size** overwrites any batch size present in
    +2531                **max_input_shape**. Default 1.
    +2532            max_input_shape: Limits the derived block shapes.
    +2533                Each axis for which the input size, parameterized by `n`, is larger
    +2534                than **max_input_shape** is set to the minimal value `n_min` for which
    +2535                this is still true.
    +2536                Use this for small input samples or large values of **ns**.
    +2537                Or simply whenever you know the full input shape.
    +2538
    +2539        Returns:
    +2540            Resolved axis sizes for model inputs and outputs.
    +2541        """
    +2542        max_input_shape = max_input_shape or {}
    +2543        if batch_size is None:
    +2544            for (_t_id, a_id), s in max_input_shape.items():
    +2545                if a_id == BATCH_AXIS_ID:
    +2546                    batch_size = s
    +2547                    break
    +2548            else:
    +2549                batch_size = 1
    +2550
    +2551        all_axes = {
    +2552            t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs)
    +2553        }
    +2554
    +2555        inputs: Dict[Tuple[TensorId, AxisId], int] = {}
    +2556        outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {}
    +2557
    +2558        def get_axis_size(a: Union[InputAxis, OutputAxis]):
    +2559            if isinstance(a, BatchAxis):
    +2560                if (t_descr.id, a.id) in ns:
    +2561                    logger.warning(
    +2562                        "Ignoring unexpected size increment factor (n) for batch axis"
    +2563                        + " of tensor '{}'.",
    +2564                        t_descr.id,
    +2565                    )
    +2566                return batch_size
    +2567            elif isinstance(a.size, int):
    +2568                if (t_descr.id, a.id) in ns:
    +2569                    logger.warning(
    +2570                        "Ignoring unexpected size increment factor (n) for fixed size"
    +2571                        + " axis '{}' of tensor '{}'.",
    +2572                        a.id,
    +2573                        t_descr.id,
    +2574                    )
    +2575                return a.size
    +2576            elif isinstance(a.size, ParameterizedSize):
    +2577                if (t_descr.id, a.id) not in ns:
    +2578                    raise ValueError(
    +2579                        "Size increment factor (n) missing for parametrized axis"
    +2580                        + f" '{a.id}' of tensor '{t_descr.id}'."
    +2581                    )
    +2582                n = ns[(t_descr.id, a.id)]
    +2583                s_max = max_input_shape.get((t_descr.id, a.id))
    +2584                if s_max is not None:
    +2585                    n = min(n, a.size.get_n(s_max))
    +2586
    +2587                return a.size.get_size(n)
    +2588
    +2589            elif isinstance(a.size, SizeReference):
    +2590                if (t_descr.id, a.id) in ns:
    +2591                    logger.warning(
    +2592                        "Ignoring unexpected size increment factor (n) for axis '{}'"
    +2593                        + " of tensor '{}' with size reference.",
    +2594                        a.id,
    +2595                        t_descr.id,
    +2596                    )
    +2597                assert not isinstance(a, BatchAxis)
    +2598                ref_axis = all_axes[a.size.tensor_id][a.size.axis_id]
    +2599                assert not isinstance(ref_axis, BatchAxis)
    +2600                ref_key = (a.size.tensor_id, a.size.axis_id)
    +2601                ref_size = inputs.get(ref_key, outputs.get(ref_key))
    +2602                assert ref_size is not None, ref_key
    +2603                assert not isinstance(ref_size, _DataDepSize), ref_key
    +2604                return a.size.get_size(
    +2605                    axis=a,
    +2606                    ref_axis=ref_axis,
    +2607                    ref_size=ref_size,
    +2608                )
    +2609            elif isinstance(a.size, DataDependentSize):
    +2610                if (t_descr.id, a.id) in ns:
    +2611                    logger.warning(
    +2612                        "Ignoring unexpected increment factor (n) for data dependent"
    +2613                        + " size axis '{}' of tensor '{}'.",
    +2614                        a.id,
    +2615                        t_descr.id,
    +2616                    )
    +2617                return _DataDepSize(a.size.min, a.size.max)
    +2618            else:
    +2619                assert_never(a.size)
    +2620
    +2621        # first resolve all , but the `SizeReference` input sizes
    +2622        for t_descr in self.inputs:
    +2623            for a in t_descr.axes:
    +2624                if not isinstance(a.size, SizeReference):
    +2625                    s = get_axis_size(a)
    +2626                    assert not isinstance(s, _DataDepSize)
    +2627                    inputs[t_descr.id, a.id] = s
    +2628
    +2629        # resolve all other input axis sizes
    +2630        for t_descr in self.inputs:
    +2631            for a in t_descr.axes:
    +2632                if isinstance(a.size, SizeReference):
    +2633                    s = get_axis_size(a)
    +2634                    assert not isinstance(s, _DataDepSize)
    +2635                    inputs[t_descr.id, a.id] = s
    +2636
    +2637        # resolve all output axis sizes
    +2638        for t_descr in self.outputs:
    +2639            for a in t_descr.axes:
    +2640                assert not isinstance(a.size, ParameterizedSize)
    +2641                s = get_axis_size(a)
    +2642                outputs[t_descr.id, a.id] = s
    +2643
    +2644        return _AxisSizes(inputs=inputs, outputs=outputs)
    +
    + + +

    Determine input and output block shape for scale factors ns +of parameterized input sizes.

    + +
    Arguments:
    + +
      +
    • ns: Scale factor n for each axis (keyed by (tensor_id, axis_id)) +that is parameterized as size = min + n * step.
    • +
    • batch_size: The desired size of the batch dimension. +If given batch_size overwrites any batch size present in +max_input_shape. Default 1.
    • +
    • max_input_shape: Limits the derived block shapes. +Each axis for which the input size, parameterized by n, is larger +than max_input_shape is set to the minimal value n_min for which +this is still true. +Use this for small input samples or large values of ns. +Or simply whenever you know the full input shape.
    • +
    + +
    Returns:
    + +
    +

    Resolved axis sizes for model inputs and outputs.

    +
    +
    + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.5.3' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 5, 3) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    +
    + ModelDescr_v0_4 = +<class 'bioimageio.spec.model.v0_4.ModelDescr'> + + +
    + + + + +
    +
    +
    + ModelDescr_v0_5 = +<class 'ModelDescr'> + + +
    + + + + +
    +
    +
    + AnyModelDescr = + + typing.Annotated[typing.Union[bioimageio.spec.model.v0_4.ModelDescr, ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + +

    Union of any released model desription

    +
    + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/model/_v0_3_converter.html b/bioimageio/spec/model/_v0_3_converter.html new file mode 100644 index 00000000..c4069f7c --- /dev/null +++ b/bioimageio/spec/model/_v0_3_converter.html @@ -0,0 +1,438 @@ + + + + + + + bioimageio.spec.model._v0_3_converter API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.model._v0_3_converter

    + + + + + + +
      1# type: ignore
    +  2from typing import Any, Dict
    +  3
    +  4
    +  5def convert_model_from_v0_3_to_0_4_0(data: Dict[str, Any]) -> None:
    +  6    """auto converts model 'data' to newest format"""
    +  7
    +  8    if "format_version" not in data:
    +  9        return
    + 10
    + 11    if data["format_version"] == "0.3.0":
    + 12        # no breaking change, bump to 0.3.1
    + 13        data["format_version"] = "0.3.1"
    + 14
    + 15    if data["format_version"] == "0.3.1":
    + 16        data = _convert_model_v0_3_1_to_v0_3_2(data)
    + 17
    + 18    if data["format_version"] == "0.3.2":
    + 19        data = _convert_model_v0_3_2_to_v0_3_3(data)
    + 20
    + 21    if data["format_version"] in ("0.3.3", "0.3.4", "0.3.5"):
    + 22        data["format_version"] = "0.3.6"
    + 23
    + 24    if data["format_version"] != "0.3.6":
    + 25        return
    + 26
    + 27    # remove 'future' from config if no other than the used future entries exist
    + 28    config = data.get("config", {})
    + 29    if config.get("future") == {}:
    + 30        del config["future"]
    + 31
    + 32    # remove 'config' if now empty
    + 33    if data.get("config") == {}:
    + 34        del data["config"]
    + 35
    + 36    data.pop("language", None)
    + 37    data.pop("framework", None)
    + 38
    + 39    architecture = data.pop("source", None)
    + 40    architecture_sha256 = data.pop("sha256", None)
    + 41    kwargs = data.pop("kwargs", None)
    + 42    pytorch_state_dict_weights_entry = data.get("weights", {}).get("pytorch_state_dict")
    + 43    if pytorch_state_dict_weights_entry is not None:
    + 44        if architecture is not None:
    + 45            pytorch_state_dict_weights_entry["architecture"] = architecture
    + 46
    + 47        if architecture_sha256 is not None:
    + 48            pytorch_state_dict_weights_entry["architecture_sha256"] = (
    + 49                architecture_sha256
    + 50            )
    + 51
    + 52        if kwargs is not None:
    + 53            pytorch_state_dict_weights_entry["kwargs"] = kwargs
    + 54
    + 55    torchscript_weights_entry = data.get("weights", {}).pop("pytorch_script", None)
    + 56    if torchscript_weights_entry is not None:
    + 57        data.setdefault("weights", {})["torchscript"] = torchscript_weights_entry
    + 58
    + 59    data["format_version"] = "0.4.0"
    + 60
    + 61
    + 62def _convert_model_v0_3_1_to_v0_3_2(data: Dict[str, Any]) -> Dict[str, Any]:
    + 63    data["type"] = "model"
    + 64    data["format_version"] = "0.3.2"
    + 65    future = data.get("config", {}).get("future", {}).pop("0.3.2", {})
    + 66
    + 67    authors = data.get("authors")
    + 68    if isinstance(authors, list):
    + 69        data["authors"] = [{"name": name} for name in authors]
    + 70        authors_update = future.get("authors")
    + 71        if authors_update is not None:
    + 72            for a, u in zip(data["authors"], authors_update):
    + 73                a.update(u)
    + 74
    + 75    # packaged_by
    + 76    packaged_by = data.get("packaged_by")
    + 77    if packaged_by is not None:
    + 78        data["packaged_by"] = [{"name": name} for name in data["packaged_by"]]
    + 79        packaged_by_update = future.get("packaged_by")
    + 80        if packaged_by_update is not None:
    + 81            for a, u in zip(data["packaged_by"], packaged_by_update):
    + 82                a.update(u)
    + 83
    + 84    # authors of weights
    + 85    weights = data.get("weights")
    + 86    if isinstance(weights, dict):
    + 87        for weights_format, weights_entry in weights.items():
    + 88            if "authors" not in weights_entry:
    + 89                continue
    + 90
    + 91            weights_entry["authors"] = [
    + 92                {"name": name} for name in weights_entry["authors"]
    + 93            ]
    + 94            authors_update = (
    + 95                future.get("weights", {}).get(weights_format, {}).get("authors")
    + 96            )
    + 97            if authors_update is not None:
    + 98                for a, u in zip(weights_entry["authors"], authors_update):
    + 99                    a.update(u)
    +100
    +101    # model version
    +102    if "version" in future:
    +103        data["version"] = future.pop("version")
    +104
    +105    return data
    +106
    +107
    +108def _convert_model_v0_3_2_to_v0_3_3(data: Dict[str, Any]) -> Dict[str, Any]:
    +109    data["format_version"] = "0.3.3"
    +110    if "outputs" in data:
    +111        for out in data["outputs"]:
    +112            if "shape" in out:
    +113                shape = out["shape"]
    +114                if isinstance(shape, dict) and "reference_input" in shape:
    +115                    shape["reference_tensor"] = shape.pop("reference_input")
    +116
    +117    return data
    +
    + + +
    +
    + +
    + + def + convert_model_from_v0_3_to_0_4_0(data: Dict[str, Any]) -> None: + + + +
    + +
     6def convert_model_from_v0_3_to_0_4_0(data: Dict[str, Any]) -> None:
    + 7    """auto converts model 'data' to newest format"""
    + 8
    + 9    if "format_version" not in data:
    +10        return
    +11
    +12    if data["format_version"] == "0.3.0":
    +13        # no breaking change, bump to 0.3.1
    +14        data["format_version"] = "0.3.1"
    +15
    +16    if data["format_version"] == "0.3.1":
    +17        data = _convert_model_v0_3_1_to_v0_3_2(data)
    +18
    +19    if data["format_version"] == "0.3.2":
    +20        data = _convert_model_v0_3_2_to_v0_3_3(data)
    +21
    +22    if data["format_version"] in ("0.3.3", "0.3.4", "0.3.5"):
    +23        data["format_version"] = "0.3.6"
    +24
    +25    if data["format_version"] != "0.3.6":
    +26        return
    +27
    +28    # remove 'future' from config if no other than the used future entries exist
    +29    config = data.get("config", {})
    +30    if config.get("future") == {}:
    +31        del config["future"]
    +32
    +33    # remove 'config' if now empty
    +34    if data.get("config") == {}:
    +35        del data["config"]
    +36
    +37    data.pop("language", None)
    +38    data.pop("framework", None)
    +39
    +40    architecture = data.pop("source", None)
    +41    architecture_sha256 = data.pop("sha256", None)
    +42    kwargs = data.pop("kwargs", None)
    +43    pytorch_state_dict_weights_entry = data.get("weights", {}).get("pytorch_state_dict")
    +44    if pytorch_state_dict_weights_entry is not None:
    +45        if architecture is not None:
    +46            pytorch_state_dict_weights_entry["architecture"] = architecture
    +47
    +48        if architecture_sha256 is not None:
    +49            pytorch_state_dict_weights_entry["architecture_sha256"] = (
    +50                architecture_sha256
    +51            )
    +52
    +53        if kwargs is not None:
    +54            pytorch_state_dict_weights_entry["kwargs"] = kwargs
    +55
    +56    torchscript_weights_entry = data.get("weights", {}).pop("pytorch_script", None)
    +57    if torchscript_weights_entry is not None:
    +58        data.setdefault("weights", {})["torchscript"] = torchscript_weights_entry
    +59
    +60    data["format_version"] = "0.4.0"
    +
    + + +

    auto converts model 'data' to newest format

    +
    + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/model/_v0_4_converter.html b/bioimageio/spec/model/_v0_4_converter.html new file mode 100644 index 00000000..e4d17306 --- /dev/null +++ b/bioimageio/spec/model/_v0_4_converter.html @@ -0,0 +1,387 @@ + + + + + + + bioimageio.spec.model._v0_4_converter API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.model._v0_4_converter

    + + + + + + +
     1import collections.abc
    + 2
    + 3from .._internal.io import BioimageioYamlContent
    + 4from ..generic._v0_2_converter import (
    + 5    remove_doi_prefix,
    + 6    remove_gh_prefix,
    + 7    remove_slashes_from_names,
    + 8)
    + 9from ._v0_3_converter import convert_model_from_v0_3_to_0_4_0
    +10
    +11
    +12def convert_from_older_format(data: BioimageioYamlContent) -> None:
    +13    fv = data.get("format_version")
    +14    if not isinstance(fv, str):
    +15        return
    +16
    +17    major_minor = tuple(map(int, fv.split(".")[:2]))
    +18    if major_minor < (0, 4):
    +19        convert_model_from_v0_3_to_0_4_0(data)
    +20    elif major_minor > (0, 4):
    +21        return
    +22
    +23    if data["format_version"] == "0.4.0":
    +24        _convert_model_from_v0_4_0_to_0_4_1(data)
    +25
    +26    if data["format_version"] in ("0.4.1", "0.4.2", "0.4.3", "0.4.4"):
    +27        _convert_model_from_v0_4_4_to_0_4_5(data)
    +28
    +29    if data["format_version"] in ("0.4.5", "0.4.6"):
    +30        remove_slashes_from_names(data)
    +31        data["format_version"] = "0.4.7"
    +32
    +33    if data["format_version"] in ("0.4.7", "0.4.8"):
    +34        data["format_version"] = "0.4.9"
    +35
    +36    if data["format_version"] == "0.4.9":
    +37        if isinstance(config := data.get("config"), dict) and isinstance(
    +38            bconfig := config.get("bioimageio"), dict
    +39        ):
    +40            if (nickname := bconfig.get("nickname")) is not None:
    +41                data["id"] = nickname
    +42
    +43            if (nickname_icon := bconfig.get("nickname_icon")) is not None:
    +44                data["id_emoji"] = nickname_icon
    +45
    +46        data["format_version"] = "0.4.10"
    +47
    +48    remove_doi_prefix(data)
    +49    remove_gh_prefix(data)
    +50    # remove 'future' from config if no other than the used future entries exist
    +51    config = data.get("config", {})
    +52    if isinstance(config, dict) and config.get("future") == {}:
    +53        del config["future"]
    +54
    +55    # remove 'config' if now empty
    +56    if data.get("config") == {}:
    +57        del data["config"]
    +58
    +59
    +60def _convert_model_from_v0_4_0_to_0_4_1(data: BioimageioYamlContent):
    +61    # move dependencies from root to pytorch_state_dict weights entry
    +62    deps = data.pop("dependencies", None)
    +63    weights = data.get("weights", {})
    +64    if deps and weights and isinstance(weights, dict):
    +65        entry = weights.get("pytorch_state_dict")
    +66        if entry and isinstance(entry, dict):
    +67            entry["dependencies"] = deps
    +68
    +69    data["format_version"] = "0.4.1"
    +70
    +71
    +72def _convert_model_from_v0_4_4_to_0_4_5(data: BioimageioYamlContent) -> None:
    +73    parent = data.pop("parent", None)
    +74    if isinstance(parent, collections.abc.Mapping) and "uri" in parent:
    +75        data["parent"] = parent["uri"]
    +76
    +77    data["format_version"] = "0.4.5"
    +
    + + +
    +
    + +
    + + def + convert_from_older_format(data: Dict[str, YamlValue]) -> None: + + + +
    + +
    13def convert_from_older_format(data: BioimageioYamlContent) -> None:
    +14    fv = data.get("format_version")
    +15    if not isinstance(fv, str):
    +16        return
    +17
    +18    major_minor = tuple(map(int, fv.split(".")[:2]))
    +19    if major_minor < (0, 4):
    +20        convert_model_from_v0_3_to_0_4_0(data)
    +21    elif major_minor > (0, 4):
    +22        return
    +23
    +24    if data["format_version"] == "0.4.0":
    +25        _convert_model_from_v0_4_0_to_0_4_1(data)
    +26
    +27    if data["format_version"] in ("0.4.1", "0.4.2", "0.4.3", "0.4.4"):
    +28        _convert_model_from_v0_4_4_to_0_4_5(data)
    +29
    +30    if data["format_version"] in ("0.4.5", "0.4.6"):
    +31        remove_slashes_from_names(data)
    +32        data["format_version"] = "0.4.7"
    +33
    +34    if data["format_version"] in ("0.4.7", "0.4.8"):
    +35        data["format_version"] = "0.4.9"
    +36
    +37    if data["format_version"] == "0.4.9":
    +38        if isinstance(config := data.get("config"), dict) and isinstance(
    +39            bconfig := config.get("bioimageio"), dict
    +40        ):
    +41            if (nickname := bconfig.get("nickname")) is not None:
    +42                data["id"] = nickname
    +43
    +44            if (nickname_icon := bconfig.get("nickname_icon")) is not None:
    +45                data["id_emoji"] = nickname_icon
    +46
    +47        data["format_version"] = "0.4.10"
    +48
    +49    remove_doi_prefix(data)
    +50    remove_gh_prefix(data)
    +51    # remove 'future' from config if no other than the used future entries exist
    +52    config = data.get("config", {})
    +53    if isinstance(config, dict) and config.get("future") == {}:
    +54        del config["future"]
    +55
    +56    # remove 'config' if now empty
    +57    if data.get("config") == {}:
    +58        del data["config"]
    +
    + + + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/model/v0_4.html b/bioimageio/spec/model/v0_4.html new file mode 100644 index 00000000..8ccb1c94 --- /dev/null +++ b/bioimageio/spec/model/v0_4.html @@ -0,0 +1,6432 @@ + + + + + + + bioimageio.spec.model.v0_4 API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.model.v0_4

    + + + + + + +
       1from __future__ import annotations
    +   2
    +   3import collections.abc
    +   4from typing import (
    +   5    Any,
    +   6    ClassVar,
    +   7    Dict,
    +   8    FrozenSet,
    +   9    List,
    +  10    Literal,
    +  11    Optional,
    +  12    Sequence,
    +  13    Tuple,
    +  14    Union,
    +  15)
    +  16
    +  17import numpy as np
    +  18from annotated_types import Ge, Interval, MaxLen, MinLen, MultipleOf
    +  19from numpy.typing import NDArray
    +  20from pydantic import (
    +  21    AllowInfNan,
    +  22    Discriminator,
    +  23    Field,
    +  24    SerializationInfo,
    +  25    SerializerFunctionWrapHandler,
    +  26    TypeAdapter,
    +  27    ValidationInfo,
    +  28    WrapSerializer,
    +  29    field_validator,
    +  30    model_validator,
    +  31)
    +  32from typing_extensions import Annotated, LiteralString, Self, assert_never
    +  33
    +  34from .._internal.common_nodes import (
    +  35    KwargsNode,
    +  36    Node,
    +  37    NodeWithExplicitlySetFields,
    +  38    StringNode,
    +  39)
    +  40from .._internal.constants import SHA256_HINT
    +  41from .._internal.field_validation import validate_unique_entries
    +  42from .._internal.field_warning import issue_warning, warn
    +  43from .._internal.io import (
    +  44    BioimageioYamlContent,
    +  45    WithSuffix,
    +  46    download,
    +  47    include_in_package_serializer,
    +  48)
    +  49from .._internal.io import FileDescr as FileDescr
    +  50from .._internal.io_basics import AbsoluteFilePath as AbsoluteFilePath
    +  51from .._internal.io_basics import Sha256 as Sha256
    +  52from .._internal.io_utils import load_array
    +  53from .._internal.packaging_context import packaging_context_var
    +  54from .._internal.types import Datetime as Datetime
    +  55from .._internal.types import Identifier as Identifier
    +  56from .._internal.types import ImportantFileSource, LowerCaseIdentifier
    +  57from .._internal.types import LicenseId as LicenseId
    +  58from .._internal.types import NotEmpty as NotEmpty
    +  59from .._internal.url import HttpUrl as HttpUrl
    +  60from .._internal.validator_annotations import AfterValidator, RestrictCharacters
    +  61from .._internal.version_type import Version as Version
    +  62from .._internal.warning_levels import ALERT, INFO
    +  63from ..dataset.v0_2 import VALID_COVER_IMAGE_EXTENSIONS as VALID_COVER_IMAGE_EXTENSIONS
    +  64from ..dataset.v0_2 import DatasetDescr as DatasetDescr
    +  65from ..dataset.v0_2 import LinkedDataset as LinkedDataset
    +  66from ..generic.v0_2 import AttachmentsDescr as AttachmentsDescr
    +  67from ..generic.v0_2 import Author as Author
    +  68from ..generic.v0_2 import BadgeDescr as BadgeDescr
    +  69from ..generic.v0_2 import CiteEntry as CiteEntry
    +  70from ..generic.v0_2 import Doi as Doi
    +  71from ..generic.v0_2 import GenericModelDescrBase
    +  72from ..generic.v0_2 import LinkedResource as LinkedResource
    +  73from ..generic.v0_2 import Maintainer as Maintainer
    +  74from ..generic.v0_2 import OrcidId as OrcidId
    +  75from ..generic.v0_2 import RelativeFilePath as RelativeFilePath
    +  76from ..generic.v0_2 import ResourceId as ResourceId
    +  77from ..generic.v0_2 import Uploader as Uploader
    +  78from ._v0_4_converter import convert_from_older_format
    +  79
    +  80
    +  81class ModelId(ResourceId):
    +  82    pass
    +  83
    +  84
    +  85AxesStr = Annotated[
    +  86    str, RestrictCharacters("bitczyx"), AfterValidator(validate_unique_entries)
    +  87]
    +  88AxesInCZYX = Annotated[
    +  89    str, RestrictCharacters("czyx"), AfterValidator(validate_unique_entries)
    +  90]
    +  91
    +  92PostprocessingName = Literal[
    +  93    "binarize",
    +  94    "clip",
    +  95    "scale_linear",
    +  96    "sigmoid",
    +  97    "zero_mean_unit_variance",
    +  98    "scale_range",
    +  99    "scale_mean_variance",
    + 100]
    + 101PreprocessingName = Literal[
    + 102    "binarize",
    + 103    "clip",
    + 104    "scale_linear",
    + 105    "sigmoid",
    + 106    "zero_mean_unit_variance",
    + 107    "scale_range",
    + 108]
    + 109
    + 110
    + 111class TensorName(LowerCaseIdentifier):
    + 112    pass
    + 113
    + 114
    + 115class CallableFromDepencency(StringNode):
    + 116    _pattern = r"^.+\..+$"
    + 117    _submodule_adapter = TypeAdapter(Identifier)
    + 118
    + 119    module_name: str
    + 120
    + 121    @field_validator("module_name", mode="after")
    + 122    def check_submodules(cls, module_name: str) -> str:
    + 123        for submod in module_name.split("."):
    + 124            _ = cls._submodule_adapter.validate_python(submod)
    + 125
    + 126        return module_name
    + 127
    + 128    callable_name: Identifier
    + 129
    + 130    @classmethod
    + 131    def _get_data(cls, valid_string_data: str):
    + 132        *mods, callname = valid_string_data.split(".")
    + 133        return dict(module_name=".".join(mods), callable_name=callname)
    + 134
    + 135
    + 136class CallableFromFile(StringNode):
    + 137    _pattern = r"^.+:.+$"
    + 138    source_file: Annotated[
    + 139        Union[RelativeFilePath, HttpUrl],
    + 140        Field(union_mode="left_to_right"),
    + 141        include_in_package_serializer,
    + 142    ]
    + 143    """∈📦 Python module that implements `callable_name`"""
    + 144    callable_name: Identifier
    + 145    """The Python identifier of  """
    + 146
    + 147    @classmethod
    + 148    def _get_data(cls, valid_string_data: str):
    + 149        *file_parts, callname = valid_string_data.split(":")
    + 150        return dict(source_file=":".join(file_parts), callable_name=callname)
    + 151
    + 152
    + 153CustomCallable = Annotated[
    + 154    Union[CallableFromFile, CallableFromDepencency], Field(union_mode="left_to_right")
    + 155]
    + 156
    + 157
    + 158class Dependencies(StringNode):
    + 159    _pattern = r"^.+:.+$"
    + 160    manager: Annotated[NotEmpty[str], Field(examples=["conda", "maven", "pip"])]
    + 161    """Dependency manager"""
    + 162
    + 163    file: Annotated[
    + 164        ImportantFileSource,
    + 165        Field(examples=["environment.yaml", "pom.xml", "requirements.txt"]),
    + 166    ]
    + 167    """∈📦 Dependency file"""
    + 168
    + 169    @classmethod
    + 170    def _get_data(cls, valid_string_data: str):
    + 171        manager, *file_parts = valid_string_data.split(":")
    + 172        return dict(manager=manager, file=":".join(file_parts))
    + 173
    + 174
    + 175WeightsFormat = Literal[
    + 176    "keras_hdf5",
    + 177    "onnx",
    + 178    "pytorch_state_dict",
    + 179    "tensorflow_js",
    + 180    "tensorflow_saved_model_bundle",
    + 181    "torchscript",
    + 182]
    + 183
    + 184
    + 185class WeightsDescr(Node):
    + 186    keras_hdf5: Optional[KerasHdf5WeightsDescr] = None
    + 187    onnx: Optional[OnnxWeightsDescr] = None
    + 188    pytorch_state_dict: Optional[PytorchStateDictWeightsDescr] = None
    + 189    tensorflow_js: Optional[TensorflowJsWeightsDescr] = None
    + 190    tensorflow_saved_model_bundle: Optional[TensorflowSavedModelBundleWeightsDescr] = (
    + 191        None
    + 192    )
    + 193    torchscript: Optional[TorchscriptWeightsDescr] = None
    + 194
    + 195    @model_validator(mode="after")
    + 196    def check_one_entry(self) -> Self:
    + 197        if all(
    + 198            entry is None
    + 199            for entry in [
    + 200                self.keras_hdf5,
    + 201                self.onnx,
    + 202                self.pytorch_state_dict,
    + 203                self.tensorflow_js,
    + 204                self.tensorflow_saved_model_bundle,
    + 205                self.torchscript,
    + 206            ]
    + 207        ):
    + 208            raise ValueError("Missing weights entry")
    + 209
    + 210        return self
    + 211
    + 212
    + 213class WeightsEntryDescrBase(FileDescr):
    + 214    type: ClassVar[WeightsFormat]
    + 215    weights_format_name: ClassVar[str]  # human readable
    + 216
    + 217    source: ImportantFileSource
    + 218    """∈📦 The weights file."""
    + 219
    + 220    attachments: Annotated[
    + 221        Union[AttachmentsDescr, None],
    + 222        warn(None, "Weights entry depends on additional attachments.", ALERT),
    + 223    ] = None
    + 224    """Attachments that are specific to this weights entry."""
    + 225
    + 226    authors: Union[List[Author], None] = None
    + 227    """Authors
    + 228    Either the person(s) that have trained this model resulting in the original weights file.
    + 229        (If this is the initial weights entry, i.e. it does not have a `parent`)
    + 230    Or the person(s) who have converted the weights to this weights format.
    + 231        (If this is a child weight, i.e. it has a `parent` field)
    + 232    """
    + 233
    + 234    dependencies: Annotated[
    + 235        Optional[Dependencies],
    + 236        warn(
    + 237            None,
    + 238            "Custom dependencies ({value}) specified. Avoid this whenever possible "
    + 239            + "to allow execution in a wider range of software environments.",
    + 240        ),
    + 241        Field(
    + 242            examples=[
    + 243                "conda:environment.yaml",
    + 244                "maven:./pom.xml",
    + 245                "pip:./requirements.txt",
    + 246            ]
    + 247        ),
    + 248    ] = None
    + 249    """Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`."""
    + 250
    + 251    parent: Annotated[
    + 252        Optional[WeightsFormat], Field(examples=["pytorch_state_dict"])
    + 253    ] = None
    + 254    """The source weights these weights were converted from.
    + 255    For example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,
    + 256    The `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.
    + 257    All weight entries except one (the initial set of weights resulting from training the model),
    + 258    need to have this field."""
    + 259
    + 260    @model_validator(mode="after")
    + 261    def check_parent_is_not_self(self) -> Self:
    + 262        if self.type == self.parent:
    + 263            raise ValueError("Weights entry can't be it's own parent.")
    + 264
    + 265        return self
    + 266
    + 267
    + 268class KerasHdf5WeightsDescr(WeightsEntryDescrBase):
    + 269    type = "keras_hdf5"
    + 270    weights_format_name: ClassVar[str] = "Keras HDF5"
    + 271    tensorflow_version: Optional[Version] = None
    + 272    """TensorFlow version used to create these weights"""
    + 273
    + 274    @field_validator("tensorflow_version", mode="after")
    + 275    @classmethod
    + 276    def _tfv(cls, value: Any):
    + 277        if value is None:
    + 278            issue_warning(
    + 279                "missing. Please specify the TensorFlow version"
    + 280                + " these weights were created with.",
    + 281                value=value,
    + 282                severity=ALERT,
    + 283                field="tensorflow_version",
    + 284            )
    + 285        return value
    + 286
    + 287
    + 288class OnnxWeightsDescr(WeightsEntryDescrBase):
    + 289    type = "onnx"
    + 290    weights_format_name: ClassVar[str] = "ONNX"
    + 291    opset_version: Optional[Annotated[int, Ge(7)]] = None
    + 292    """ONNX opset version"""
    + 293
    + 294    @field_validator("opset_version", mode="after")
    + 295    @classmethod
    + 296    def _ov(cls, value: Any):
    + 297        if value is None:
    + 298            issue_warning(
    + 299                "Missing ONNX opset version (aka ONNX opset number). "
    + 300                + "Please specify the ONNX opset version these weights were created"
    + 301                + " with.",
    + 302                value=value,
    + 303                severity=ALERT,
    + 304                field="opset_version",
    + 305            )
    + 306        return value
    + 307
    + 308
    + 309class PytorchStateDictWeightsDescr(WeightsEntryDescrBase):
    + 310    type = "pytorch_state_dict"
    + 311    weights_format_name: ClassVar[str] = "Pytorch State Dict"
    + 312    architecture: CustomCallable = Field(
    + 313        examples=["my_function.py:MyNetworkClass", "my_module.submodule.get_my_model"]
    + 314    )
    + 315    """callable returning a torch.nn.Module instance.
    + 316    Local implementation: `<relative path to file>:<identifier of implementation within the file>`.
    + 317    Implementation in a dependency: `<dependency-package>.<[dependency-module]>.<identifier>`."""
    + 318
    + 319    architecture_sha256: Annotated[
    + 320        Optional[Sha256],
    + 321        Field(
    + 322            description=(
    + 323                "The SHA256 of the architecture source file, if the architecture is not"
    + 324                " defined in a module listed in `dependencies`\n"
    + 325            )
    + 326            + SHA256_HINT,
    + 327        ),
    + 328    ] = None
    + 329    """The SHA256 of the architecture source file,
    + 330    if the architecture is not defined in a module listed in `dependencies`"""
    + 331
    + 332    @model_validator(mode="after")
    + 333    def check_architecture_sha256(self) -> Self:
    + 334        if isinstance(self.architecture, CallableFromFile):
    + 335            if self.architecture_sha256 is None:
    + 336                raise ValueError(
    + 337                    "Missing required `architecture_sha256` for `architecture` with"
    + 338                    + " source file."
    + 339                )
    + 340        elif self.architecture_sha256 is not None:
    + 341            raise ValueError(
    + 342                "Got `architecture_sha256` for architecture that does not have a source"
    + 343                + " file."
    + 344            )
    + 345
    + 346        return self
    + 347
    + 348    kwargs: Dict[str, Any] = Field(default_factory=dict)
    + 349    """key word arguments for the `architecture` callable"""
    + 350
    + 351    pytorch_version: Optional[Version] = None
    + 352    """Version of the PyTorch library used.
    + 353    If `depencencies` is specified it should include pytorch and the verison has to match.
    + 354    (`dependencies` overrules `pytorch_version`)"""
    + 355
    + 356    @field_validator("pytorch_version", mode="after")
    + 357    @classmethod
    + 358    def _ptv(cls, value: Any):
    + 359        if value is None:
    + 360            issue_warning(
    + 361                "missing. Please specify the PyTorch version these"
    + 362                + " PyTorch state dict weights were created with.",
    + 363                value=value,
    + 364                severity=ALERT,
    + 365                field="pytorch_version",
    + 366            )
    + 367        return value
    + 368
    + 369
    + 370class TorchscriptWeightsDescr(WeightsEntryDescrBase):
    + 371    type = "torchscript"
    + 372    weights_format_name: ClassVar[str] = "TorchScript"
    + 373    pytorch_version: Optional[Version] = None
    + 374    """Version of the PyTorch library used."""
    + 375
    + 376    @field_validator("pytorch_version", mode="after")
    + 377    @classmethod
    + 378    def _ptv(cls, value: Any):
    + 379        if value is None:
    + 380            issue_warning(
    + 381                "missing. Please specify the PyTorch version these"
    + 382                + " Torchscript weights were created with.",
    + 383                value=value,
    + 384                severity=ALERT,
    + 385                field="pytorch_version",
    + 386            )
    + 387        return value
    + 388
    + 389
    + 390class TensorflowJsWeightsDescr(WeightsEntryDescrBase):
    + 391    type = "tensorflow_js"
    + 392    weights_format_name: ClassVar[str] = "Tensorflow.js"
    + 393    tensorflow_version: Optional[Version] = None
    + 394    """Version of the TensorFlow library used."""
    + 395
    + 396    @field_validator("tensorflow_version", mode="after")
    + 397    @classmethod
    + 398    def _tfv(cls, value: Any):
    + 399        if value is None:
    + 400            issue_warning(
    + 401                "missing. Please specify the TensorFlow version"
    + 402                + " these TensorflowJs weights were created with.",
    + 403                value=value,
    + 404                severity=ALERT,
    + 405                field="tensorflow_version",
    + 406            )
    + 407        return value
    + 408
    + 409    source: ImportantFileSource
    + 410    """∈📦 The multi-file weights.
    + 411    All required files/folders should be a zip archive."""
    + 412
    + 413
    + 414class TensorflowSavedModelBundleWeightsDescr(WeightsEntryDescrBase):
    + 415    type = "tensorflow_saved_model_bundle"
    + 416    weights_format_name: ClassVar[str] = "Tensorflow Saved Model"
    + 417    tensorflow_version: Optional[Version] = None
    + 418    """Version of the TensorFlow library used."""
    + 419
    + 420    @field_validator("tensorflow_version", mode="after")
    + 421    @classmethod
    + 422    def _tfv(cls, value: Any):
    + 423        if value is None:
    + 424            issue_warning(
    + 425                "missing. Please specify the TensorFlow version"
    + 426                + " these Tensorflow saved model bundle weights were created with.",
    + 427                value=value,
    + 428                severity=ALERT,
    + 429                field="tensorflow_version",
    + 430            )
    + 431        return value
    + 432
    + 433
    + 434class ParameterizedInputShape(Node):
    + 435    """A sequence of valid shapes given by `shape_k = min + k * step for k in {0, 1, ...}`."""
    + 436
    + 437    min: NotEmpty[List[int]]
    + 438    """The minimum input shape"""
    + 439
    + 440    step: NotEmpty[List[int]]
    + 441    """The minimum shape change"""
    + 442
    + 443    def __len__(self) -> int:
    + 444        return len(self.min)
    + 445
    + 446    @model_validator(mode="after")
    + 447    def matching_lengths(self) -> Self:
    + 448        if len(self.min) != len(self.step):
    + 449            raise ValueError("`min` and `step` required to have the same length")
    + 450
    + 451        return self
    + 452
    + 453
    + 454class ImplicitOutputShape(Node):
    + 455    """Output tensor shape depending on an input tensor shape.
    + 456    `shape(output_tensor) = shape(input_tensor) * scale + 2 * offset`"""
    + 457
    + 458    reference_tensor: TensorName
    + 459    """Name of the reference tensor."""
    + 460
    + 461    scale: NotEmpty[List[Optional[float]]]
    + 462    """output_pix/input_pix for each dimension.
    + 463    'null' values indicate new dimensions, whose length is defined by 2*`offset`"""
    + 464
    + 465    offset: NotEmpty[List[Union[int, Annotated[float, MultipleOf(0.5)]]]]
    + 466    """Position of origin wrt to input."""
    + 467
    + 468    def __len__(self) -> int:
    + 469        return len(self.scale)
    + 470
    + 471    @model_validator(mode="after")
    + 472    def matching_lengths(self) -> Self:
    + 473        if len(self.scale) != len(self.offset):
    + 474            raise ValueError(
    + 475                f"scale {self.scale} has to have same length as offset {self.offset}!"
    + 476            )
    + 477        # if we have an expanded dimension, make sure that it's offet is not zero
    + 478        for sc, off in zip(self.scale, self.offset):
    + 479            if sc is None and not off:
    + 480                raise ValueError("`offset` must not be zero if `scale` is none/zero")
    + 481
    + 482        return self
    + 483
    + 484
    + 485class TensorDescrBase(Node):
    + 486    name: TensorName
    + 487    """Tensor name. No duplicates are allowed."""
    + 488
    + 489    description: str = ""
    + 490
    + 491    axes: AxesStr
    + 492    """Axes identifying characters. Same length and order as the axes in `shape`.
    + 493    | axis | description |
    + 494    | --- | --- |
    + 495    |  b  |  batch (groups multiple samples) |
    + 496    |  i  |  instance/index/element |
    + 497    |  t  |  time |
    + 498    |  c  |  channel |
    + 499    |  z  |  spatial dimension z |
    + 500    |  y  |  spatial dimension y |
    + 501    |  x  |  spatial dimension x |
    + 502    """
    + 503
    + 504    data_range: Optional[
    + 505        Tuple[Annotated[float, AllowInfNan(True)], Annotated[float, AllowInfNan(True)]]
    + 506    ] = None
    + 507    """Tuple `(minimum, maximum)` specifying the allowed range of the data in this tensor.
    + 508    If not specified, the full data range that can be expressed in `data_type` is allowed."""
    + 509
    + 510
    + 511class ProcessingKwargs(KwargsNode):
    + 512    """base class for pre-/postprocessing key word arguments"""
    + 513
    + 514
    + 515class ProcessingDescrBase(NodeWithExplicitlySetFields):
    + 516    """processing base class"""
    + 517
    + 518    # name: Literal[PreprocessingName, PostprocessingName]  # todo: make abstract field
    + 519    fields_to_set_explicitly: ClassVar[FrozenSet[LiteralString]] = frozenset({"name"})
    + 520
    + 521
    + 522class BinarizeKwargs(ProcessingKwargs):
    + 523    """key word arguments for `BinarizeDescr`"""
    + 524
    + 525    threshold: float
    + 526    """The fixed threshold"""
    + 527
    + 528
    + 529class BinarizeDescr(ProcessingDescrBase):
    + 530    """BinarizeDescr the tensor with a fixed `BinarizeKwargs.threshold`.
    + 531    Values above the threshold will be set to one, values below the threshold to zero.
    + 532    """
    + 533
    + 534    name: Literal["binarize"] = "binarize"
    + 535    kwargs: BinarizeKwargs
    + 536
    + 537
    + 538class ClipKwargs(ProcessingKwargs):
    + 539    """key word arguments for `ClipDescr`"""
    + 540
    + 541    min: float
    + 542    """minimum value for clipping"""
    + 543    max: float
    + 544    """maximum value for clipping"""
    + 545
    + 546
    + 547class ClipDescr(ProcessingDescrBase):
    + 548    """Clip tensor values to a range.
    + 549
    + 550    Set tensor values below `ClipKwargs.min` to `ClipKwargs.min`
    + 551    and above `ClipKwargs.max` to `ClipKwargs.max`.
    + 552    """
    + 553
    + 554    name: Literal["clip"] = "clip"
    + 555
    + 556    kwargs: ClipKwargs
    + 557
    + 558
    + 559class ScaleLinearKwargs(ProcessingKwargs):
    + 560    """key word arguments for `ScaleLinearDescr`"""
    + 561
    + 562    axes: Annotated[Optional[AxesInCZYX], Field(examples=["xy"])] = None
    + 563    """The subset of axes to scale jointly.
    + 564    For example xy to scale the two image axes for 2d data jointly."""
    + 565
    + 566    gain: Union[float, List[float]] = 1.0
    + 567    """multiplicative factor"""
    + 568
    + 569    offset: Union[float, List[float]] = 0.0
    + 570    """additive term"""
    + 571
    + 572    @model_validator(mode="after")
    + 573    def either_gain_or_offset(self) -> Self:
    + 574        if (
    + 575            self.gain == 1.0
    + 576            or isinstance(self.gain, list)
    + 577            and all(g == 1.0 for g in self.gain)
    + 578        ) and (
    + 579            self.offset == 0.0
    + 580            or isinstance(self.offset, list)
    + 581            and all(off == 0.0 for off in self.offset)
    + 582        ):
    + 583            raise ValueError(
    + 584                "Redunt linear scaling not allowd. Set `gain` != 1.0 and/or `offset` !="
    + 585                + " 0.0."
    + 586            )
    + 587
    + 588        return self
    + 589
    + 590
    + 591class ScaleLinearDescr(ProcessingDescrBase):
    + 592    """Fixed linear scaling."""
    + 593
    + 594    name: Literal["scale_linear"] = "scale_linear"
    + 595    kwargs: ScaleLinearKwargs
    + 596
    + 597
    + 598class SigmoidDescr(ProcessingDescrBase):
    + 599    """The logistic sigmoid funciton, a.k.a. expit function."""
    + 600
    + 601    name: Literal["sigmoid"] = "sigmoid"
    + 602
    + 603    @property
    + 604    def kwargs(self) -> ProcessingKwargs:
    + 605        """empty kwargs"""
    + 606        return ProcessingKwargs()
    + 607
    + 608
    + 609class ZeroMeanUnitVarianceKwargs(ProcessingKwargs):
    + 610    """key word arguments for `ZeroMeanUnitVarianceDescr`"""
    + 611
    + 612    mode: Literal["fixed", "per_dataset", "per_sample"] = "fixed"
    + 613    """Mode for computing mean and variance.
    + 614    |     mode    |             description              |
    + 615    | ----------- | ------------------------------------ |
    + 616    |   fixed     | Fixed values for mean and variance   |
    + 617    | per_dataset | Compute for the entire dataset       |
    + 618    | per_sample  | Compute for each sample individually |
    + 619    """
    + 620    axes: Annotated[AxesInCZYX, Field(examples=["xy"])]
    + 621    """The subset of axes to normalize jointly.
    + 622    For example `xy` to normalize the two image axes for 2d data jointly."""
    + 623
    + 624    mean: Annotated[
    + 625        Union[float, NotEmpty[List[float]], None], Field(examples=[(1.1, 2.2, 3.3)])
    + 626    ] = None
    + 627    """The mean value(s) to use for `mode: fixed`.
    + 628    For example `[1.1, 2.2, 3.3]` in the case of a 3 channel image with `axes: xy`."""
    + 629    # todo: check if means match input axes (for mode 'fixed')
    + 630
    + 631    std: Annotated[
    + 632        Union[float, NotEmpty[List[float]], None], Field(examples=[(0.1, 0.2, 0.3)])
    + 633    ] = None
    + 634    """The standard deviation values to use for `mode: fixed`. Analogous to mean."""
    + 635
    + 636    eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-6
    + 637    """epsilon for numeric stability: `out = (tensor - mean) / (std + eps)`."""
    + 638
    + 639    @model_validator(mode="after")
    + 640    def mean_and_std_match_mode(self) -> Self:
    + 641        if self.mode == "fixed" and (self.mean is None or self.std is None):
    + 642            raise ValueError("`mean` and `std` are required for `mode: fixed`.")
    + 643        elif self.mode != "fixed" and (self.mean is not None or self.std is not None):
    + 644            raise ValueError(f"`mean` and `std` not allowed for `mode: {self.mode}`")
    + 645
    + 646        return self
    + 647
    + 648
    + 649class ZeroMeanUnitVarianceDescr(ProcessingDescrBase):
    + 650    """Subtract mean and divide by variance."""
    + 651
    + 652    name: Literal["zero_mean_unit_variance"] = "zero_mean_unit_variance"
    + 653    kwargs: ZeroMeanUnitVarianceKwargs
    + 654
    + 655
    + 656class ScaleRangeKwargs(ProcessingKwargs):
    + 657    """key word arguments for `ScaleRangeDescr`
    + 658
    + 659    For `min_percentile`=0.0 (the default) and `max_percentile`=100 (the default)
    + 660    this processing step normalizes data to the [0, 1] intervall.
    + 661    For other percentiles the normalized values will partially be outside the [0, 1]
    + 662    intervall. Use `ScaleRange` followed by `ClipDescr` if you want to limit the
    + 663    normalized values to a range.
    + 664    """
    + 665
    + 666    mode: Literal["per_dataset", "per_sample"]
    + 667    """Mode for computing percentiles.
    + 668    |     mode    |             description              |
    + 669    | ----------- | ------------------------------------ |
    + 670    | per_dataset | compute for the entire dataset       |
    + 671    | per_sample  | compute for each sample individually |
    + 672    """
    + 673    axes: Annotated[AxesInCZYX, Field(examples=["xy"])]
    + 674    """The subset of axes to normalize jointly.
    + 675    For example xy to normalize the two image axes for 2d data jointly."""
    + 676
    + 677    min_percentile: Annotated[Union[int, float], Interval(ge=0, lt=100)] = 0.0
    + 678    """The lower percentile used to determine the value to align with zero."""
    + 679
    + 680    max_percentile: Annotated[Union[int, float], Interval(gt=1, le=100)] = 100.0
    + 681    """The upper percentile used to determine the value to align with one.
    + 682    Has to be bigger than `min_percentile`.
    + 683    The range is 1 to 100 instead of 0 to 100 to avoid mistakenly
    + 684    accepting percentiles specified in the range 0.0 to 1.0."""
    + 685
    + 686    @model_validator(mode="after")
    + 687    def min_smaller_max(self, info: ValidationInfo) -> Self:
    + 688        if self.min_percentile >= self.max_percentile:
    + 689            raise ValueError(
    + 690                f"min_percentile {self.min_percentile} >= max_percentile"
    + 691                + f" {self.max_percentile}"
    + 692            )
    + 693
    + 694        return self
    + 695
    + 696    eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-6
    + 697    """Epsilon for numeric stability.
    + 698    `out = (tensor - v_lower) / (v_upper - v_lower + eps)`;
    + 699    with `v_lower,v_upper` values at the respective percentiles."""
    + 700
    + 701    reference_tensor: Optional[TensorName] = None
    + 702    """Tensor name to compute the percentiles from. Default: The tensor itself.
    + 703    For any tensor in `inputs` only input tensor references are allowed.
    + 704    For a tensor in `outputs` only input tensor refereences are allowed if `mode: per_dataset`"""
    + 705
    + 706
    + 707class ScaleRangeDescr(ProcessingDescrBase):
    + 708    """Scale with percentiles."""
    + 709
    + 710    name: Literal["scale_range"] = "scale_range"
    + 711    kwargs: ScaleRangeKwargs
    + 712
    + 713
    + 714class ScaleMeanVarianceKwargs(ProcessingKwargs):
    + 715    """key word arguments for `ScaleMeanVarianceDescr`"""
    + 716
    + 717    mode: Literal["per_dataset", "per_sample"]
    + 718    """Mode for computing mean and variance.
    + 719    |     mode    |             description              |
    + 720    | ----------- | ------------------------------------ |
    + 721    | per_dataset | Compute for the entire dataset       |
    + 722    | per_sample  | Compute for each sample individually |
    + 723    """
    + 724
    + 725    reference_tensor: TensorName
    + 726    """Name of tensor to match."""
    + 727
    + 728    axes: Annotated[Optional[AxesInCZYX], Field(examples=["xy"])] = None
    + 729    """The subset of axes to scale jointly.
    + 730    For example xy to normalize the two image axes for 2d data jointly.
    + 731    Default: scale all non-batch axes jointly."""
    + 732
    + 733    eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-6
    + 734    """Epsilon for numeric stability:
    + 735    "`out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean."""
    + 736
    + 737
    + 738class ScaleMeanVarianceDescr(ProcessingDescrBase):
    + 739    """Scale the tensor s.t. its mean and variance match a reference tensor."""
    + 740
    + 741    name: Literal["scale_mean_variance"] = "scale_mean_variance"
    + 742    kwargs: ScaleMeanVarianceKwargs
    + 743
    + 744
    + 745PreprocessingDescr = Annotated[
    + 746    Union[
    + 747        BinarizeDescr,
    + 748        ClipDescr,
    + 749        ScaleLinearDescr,
    + 750        SigmoidDescr,
    + 751        ZeroMeanUnitVarianceDescr,
    + 752        ScaleRangeDescr,
    + 753    ],
    + 754    Discriminator("name"),
    + 755]
    + 756PostprocessingDescr = Annotated[
    + 757    Union[
    + 758        BinarizeDescr,
    + 759        ClipDescr,
    + 760        ScaleLinearDescr,
    + 761        SigmoidDescr,
    + 762        ZeroMeanUnitVarianceDescr,
    + 763        ScaleRangeDescr,
    + 764        ScaleMeanVarianceDescr,
    + 765    ],
    + 766    Discriminator("name"),
    + 767]
    + 768
    + 769
    + 770class InputTensorDescr(TensorDescrBase):
    + 771    data_type: Literal["float32", "uint8", "uint16"]
    + 772    """For now an input tensor is expected to be given as `float32`.
    + 773    The data flow in bioimage.io models is explained
    + 774    [in this diagram.](https://docs.google.com/drawings/d/1FTw8-Rn6a6nXdkZ_SkMumtcjvur9mtIhRqLwnKqZNHM/edit)."""
    + 775
    + 776    shape: Annotated[
    + 777        Union[Sequence[int], ParameterizedInputShape],
    + 778        Field(
    + 779            examples=[(1, 512, 512, 1), dict(min=(1, 64, 64, 1), step=(0, 32, 32, 0))]
    + 780        ),
    + 781    ]
    + 782    """Specification of input tensor shape."""
    + 783
    + 784    preprocessing: List[PreprocessingDescr] = Field(default_factory=list)
    + 785    """Description of how this input should be preprocessed."""
    + 786
    + 787    @model_validator(mode="after")
    + 788    def zero_batch_step_and_one_batch_size(self) -> Self:
    + 789        bidx = self.axes.find("b")
    + 790        if bidx == -1:
    + 791            return self
    + 792
    + 793        if isinstance(self.shape, ParameterizedInputShape):
    + 794            step = self.shape.step
    + 795            shape = self.shape.min
    + 796            if step[bidx] != 0:
    + 797                raise ValueError(
    + 798                    "Input shape step has to be zero in the batch dimension (the batch"
    + 799                    + " dimension can always be increased, but `step` should specify how"
    + 800                    + " to increase the minimal shape to find the largest single batch"
    + 801                    + " shape)"
    + 802                )
    + 803        else:
    + 804            shape = self.shape
    + 805
    + 806        if shape[bidx] != 1:
    + 807            raise ValueError("Input shape has to be 1 in the batch dimension b.")
    + 808
    + 809        return self
    + 810
    + 811    @model_validator(mode="after")
    + 812    def validate_preprocessing_kwargs(self) -> Self:
    + 813        for p in self.preprocessing:
    + 814            kwargs_axes = p.kwargs.get("axes", "")
    + 815            if not isinstance(kwargs_axes, str):
    + 816                raise ValueError(
    + 817                    f"Expected an `axes` string, but got {type(kwargs_axes)}"
    + 818                )
    + 819
    + 820            if any(a not in self.axes for a in kwargs_axes):
    + 821                raise ValueError("`kwargs.axes` needs to be subset of `axes`")
    + 822
    + 823        return self
    + 824
    + 825
    + 826class OutputTensorDescr(TensorDescrBase):
    + 827    data_type: Literal[
    + 828        "float32",
    + 829        "float64",
    + 830        "uint8",
    + 831        "int8",
    + 832        "uint16",
    + 833        "int16",
    + 834        "uint32",
    + 835        "int32",
    + 836        "uint64",
    + 837        "int64",
    + 838        "bool",
    + 839    ]
    + 840    """Data type.
    + 841    The data flow in bioimage.io models is explained
    + 842    [in this diagram.](https://docs.google.com/drawings/d/1FTw8-Rn6a6nXdkZ_SkMumtcjvur9mtIhRqLwnKqZNHM/edit)."""
    + 843
    + 844    shape: Union[Sequence[int], ImplicitOutputShape]
    + 845    """Output tensor shape."""
    + 846
    + 847    halo: Optional[Sequence[int]] = None
    + 848    """The `halo` that should be cropped from the output tensor to avoid boundary effects.
    + 849    The `halo` is to be cropped from both sides, i.e. `shape_after_crop = shape - 2 * halo`.
    + 850    To document a `halo` that is already cropped by the model `shape.offset` has to be used instead."""
    + 851
    + 852    postprocessing: List[PostprocessingDescr] = Field(default_factory=list)
    + 853    """Description of how this output should be postprocessed."""
    + 854
    + 855    @model_validator(mode="after")
    + 856    def matching_halo_length(self) -> Self:
    + 857        if self.halo and len(self.halo) != len(self.shape):
    + 858            raise ValueError(
    + 859                f"halo {self.halo} has to have same length as shape {self.shape}!"
    + 860            )
    + 861
    + 862        return self
    + 863
    + 864    @model_validator(mode="after")
    + 865    def validate_postprocessing_kwargs(self) -> Self:
    + 866        for p in self.postprocessing:
    + 867            kwargs_axes = p.kwargs.get("axes", "")
    + 868            if not isinstance(kwargs_axes, str):
    + 869                raise ValueError(f"Expected {kwargs_axes} to be a string")
    + 870
    + 871            if any(a not in self.axes for a in kwargs_axes):
    + 872                raise ValueError("`kwargs.axes` needs to be subset of axes")
    + 873
    + 874        return self
    + 875
    + 876
    + 877KnownRunMode = Literal["deepimagej"]
    + 878
    + 879
    + 880class RunMode(Node):
    + 881    name: Annotated[
    + 882        Union[KnownRunMode, str], warn(KnownRunMode, "Unknown run mode '{value}'.")
    + 883    ]
    + 884    """Run mode name"""
    + 885
    + 886    kwargs: Dict[str, Any] = Field(default_factory=dict)
    + 887    """Run mode specific key word arguments"""
    + 888
    + 889
    + 890class LinkedModel(Node):
    + 891    """Reference to a bioimage.io model."""
    + 892
    + 893    id: ModelId
    + 894    """A valid model `id` from the bioimage.io collection."""
    + 895
    + 896    version_number: Optional[int] = None
    + 897    """version number (n-th published version, not the semantic version) of linked model"""
    + 898
    + 899
    + 900def package_weights(
    + 901    value: Node,  # Union[v0_4.WeightsDescr, v0_5.WeightsDescr]
    + 902    handler: SerializerFunctionWrapHandler,
    + 903    info: SerializationInfo,
    + 904):
    + 905    ctxt = packaging_context_var.get()
    + 906    if ctxt is not None and ctxt.weights_priority_order is not None:
    + 907        for wf in ctxt.weights_priority_order:
    + 908            w = getattr(value, wf, None)
    + 909            if w is not None:
    + 910                break
    + 911        else:
    + 912            raise ValueError(
    + 913                "None of the weight formats in `weights_priority_order`"
    + 914                + f" ({ctxt.weights_priority_order}) is present in the given model."
    + 915            )
    + 916
    + 917        assert isinstance(w, Node), type(w)
    + 918        # construct WeightsDescr with new single weight format entry
    + 919        new_w = w.model_construct(**{k: v for k, v in w if k != "parent"})
    + 920        value = value.model_construct(None, **{wf: new_w})
    + 921
    + 922    return handler(
    + 923        value, info  # pyright: ignore[reportArgumentType]  # taken from pydantic docs
    + 924    )
    + 925
    + 926
    + 927class ModelDescr(GenericModelDescrBase, title="bioimage.io model specification"):
    + 928    """Specification of the fields used in a bioimage.io-compliant RDF that describes AI models with pretrained weights.
    + 929
    + 930    These fields are typically stored in a YAML file which we call a model resource description file (model RDF).
    + 931    """
    + 932
    + 933    format_version: Literal["0.4.10",] = "0.4.10"
    + 934    """Version of the bioimage.io model description specification used.
    + 935    When creating a new model always use the latest micro/patch version described here.
    + 936    The `format_version` is important for any consumer software to understand how to parse the fields.
    + 937    """
    + 938
    + 939    type: Literal["model"] = "model"
    + 940    """Specialized resource type 'model'"""
    + 941
    + 942    id: Optional[ModelId] = None
    + 943    """bioimage.io-wide unique resource identifier
    + 944    assigned by bioimage.io; version **un**specific."""
    + 945
    + 946    authors: NotEmpty[  # pyright: ignore[reportGeneralTypeIssues]  # make mandatory
    + 947        List[Author]
    + 948    ]
    + 949    """The authors are the creators of the model RDF and the primary points of contact."""
    + 950
    + 951    documentation: Annotated[
    + 952        ImportantFileSource,
    + 953        Field(
    + 954            examples=[
    + 955                "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
    + 956                "README.md",
    + 957            ],
    + 958        ),
    + 959    ]
    + 960    """∈📦 URL or relative path to a markdown file with additional documentation.
    + 961    The recommended documentation file name is `README.md`. An `.md` suffix is mandatory.
    + 962    The documentation should include a '[#[#]]# Validation' (sub)section
    + 963    with details on how to quantitatively validate the model on unseen data."""
    + 964
    + 965    inputs: NotEmpty[List[InputTensorDescr]]
    + 966    """Describes the input tensors expected by this model."""
    + 967
    + 968    license: Annotated[
    + 969        Union[LicenseId, str],
    + 970        warn(LicenseId, "Unknown license id '{value}'."),
    + 971        Field(examples=["CC0-1.0", "MIT", "BSD-2-Clause"]),
    + 972    ]
    + 973    """A [SPDX license identifier](https://spdx.org/licenses/).
    + 974    We do notsupport custom license beyond the SPDX license list, if you need that please
    + 975    [open a GitHub issue](https://github.com/bioimage-io/spec-bioimage-io/issues/new/choose
    + 976    ) to discuss your intentions with the community."""
    + 977
    + 978    name: Annotated[
    + 979        str,
    + 980        MinLen(1),
    + 981        warn(MinLen(5), "Name shorter than 5 characters.", INFO),
    + 982        warn(MaxLen(64), "Name longer than 64 characters.", INFO),
    + 983    ]
    + 984    """A human-readable name of this model.
    + 985    It should be no longer than 64 characters and only contain letter, number, underscore, minus or space characters."""
    + 986
    + 987    outputs: NotEmpty[List[OutputTensorDescr]]
    + 988    """Describes the output tensors."""
    + 989
    + 990    @field_validator("inputs", "outputs")
    + 991    @classmethod
    + 992    def unique_tensor_descr_names(
    + 993        cls, value: Sequence[Union[InputTensorDescr, OutputTensorDescr]]
    + 994    ) -> Sequence[Union[InputTensorDescr, OutputTensorDescr]]:
    + 995        unique_names = {str(v.name) for v in value}
    + 996        if len(unique_names) != len(value):
    + 997            raise ValueError("Duplicate tensor descriptor names")
    + 998
    + 999        return value
    +1000
    +1001    @model_validator(mode="after")
    +1002    def unique_io_names(self) -> Self:
    +1003        unique_names = {str(ss.name) for s in (self.inputs, self.outputs) for ss in s}
    +1004        if len(unique_names) != (len(self.inputs) + len(self.outputs)):
    +1005            raise ValueError("Duplicate tensor descriptor names across inputs/outputs")
    +1006
    +1007        return self
    +1008
    +1009    @model_validator(mode="after")
    +1010    def minimum_shape2valid_output(self) -> Self:
    +1011        tensors_by_name: Dict[
    +1012            TensorName, Union[InputTensorDescr, OutputTensorDescr]
    +1013        ] = {t.name: t for t in self.inputs + self.outputs}
    +1014
    +1015        for out in self.outputs:
    +1016            if isinstance(out.shape, ImplicitOutputShape):
    +1017                ndim_ref = len(tensors_by_name[out.shape.reference_tensor].shape)
    +1018                ndim_out_ref = len(
    +1019                    [scale for scale in out.shape.scale if scale is not None]
    +1020                )
    +1021                if ndim_ref != ndim_out_ref:
    +1022                    expanded_dim_note = (
    +1023                        " Note that expanded dimensions (`scale`: null) are not"
    +1024                        + f" counted for {out.name}'sdimensionality here."
    +1025                        if None in out.shape.scale
    +1026                        else ""
    +1027                    )
    +1028                    raise ValueError(
    +1029                        f"Referenced tensor '{out.shape.reference_tensor}' with"
    +1030                        + f" {ndim_ref} dimensions does not match output tensor"
    +1031                        + f" '{out.name}' with"
    +1032                        + f" {ndim_out_ref} dimensions.{expanded_dim_note}"
    +1033                    )
    +1034
    +1035            min_out_shape = self._get_min_shape(out, tensors_by_name)
    +1036            if out.halo:
    +1037                halo = out.halo
    +1038                halo_msg = f" for halo {out.halo}"
    +1039            else:
    +1040                halo = [0] * len(min_out_shape)
    +1041                halo_msg = ""
    +1042
    +1043            if any([s - 2 * h < 1 for s, h in zip(min_out_shape, halo)]):
    +1044                raise ValueError(
    +1045                    f"Minimal shape {min_out_shape} of output {out.name} is too"
    +1046                    + f" small{halo_msg}."
    +1047                )
    +1048
    +1049        return self
    +1050
    +1051    @classmethod
    +1052    def _get_min_shape(
    +1053        cls,
    +1054        t: Union[InputTensorDescr, OutputTensorDescr],
    +1055        tensors_by_name: Dict[TensorName, Union[InputTensorDescr, OutputTensorDescr]],
    +1056    ) -> Sequence[int]:
    +1057        """output with subtracted halo has to result in meaningful output even for the minimal input
    +1058        see https://github.com/bioimage-io/spec-bioimage-io/issues/392
    +1059        """
    +1060        if isinstance(t.shape, collections.abc.Sequence):
    +1061            return t.shape
    +1062        elif isinstance(t.shape, ParameterizedInputShape):
    +1063            return t.shape.min
    +1064        elif isinstance(t.shape, ImplicitOutputShape):
    +1065            pass
    +1066        else:
    +1067            assert_never(t.shape)
    +1068
    +1069        ref_shape = cls._get_min_shape(
    +1070            tensors_by_name[t.shape.reference_tensor], tensors_by_name
    +1071        )
    +1072
    +1073        if None not in t.shape.scale:
    +1074            scale: Sequence[float, ...] = t.shape.scale  # type: ignore
    +1075        else:
    +1076            expanded_dims = [idx for idx, sc in enumerate(t.shape.scale) if sc is None]
    +1077            new_ref_shape: List[int] = []
    +1078            for idx in range(len(t.shape.scale)):
    +1079                ref_idx = idx - sum(int(exp < idx) for exp in expanded_dims)
    +1080                new_ref_shape.append(1 if idx in expanded_dims else ref_shape[ref_idx])
    +1081
    +1082            ref_shape = new_ref_shape
    +1083            assert len(ref_shape) == len(t.shape.scale)
    +1084            scale = [0.0 if sc is None else sc for sc in t.shape.scale]
    +1085
    +1086        offset = t.shape.offset
    +1087        assert len(offset) == len(scale)
    +1088        return [int(rs * s + 2 * off) for rs, s, off in zip(ref_shape, scale, offset)]
    +1089
    +1090    @model_validator(mode="after")
    +1091    def validate_tensor_references_in_inputs(self) -> Self:
    +1092        for t in self.inputs:
    +1093            for proc in t.preprocessing:
    +1094                if "reference_tensor" not in proc.kwargs:
    +1095                    continue
    +1096
    +1097                ref_tensor = proc.kwargs["reference_tensor"]
    +1098                if ref_tensor is not None and str(ref_tensor) not in {
    +1099                    str(t.name) for t in self.inputs
    +1100                }:
    +1101                    raise ValueError(f"'{ref_tensor}' not found in inputs")
    +1102
    +1103                if ref_tensor == t.name:
    +1104                    raise ValueError(
    +1105                        f"invalid self reference for preprocessing of tensor {t.name}"
    +1106                    )
    +1107
    +1108        return self
    +1109
    +1110    @model_validator(mode="after")
    +1111    def validate_tensor_references_in_outputs(self) -> Self:
    +1112        for t in self.outputs:
    +1113            for proc in t.postprocessing:
    +1114                if "reference_tensor" not in proc.kwargs:
    +1115                    continue
    +1116                ref_tensor = proc.kwargs["reference_tensor"]
    +1117                if ref_tensor is not None and str(ref_tensor) not in {
    +1118                    str(t.name) for t in self.inputs
    +1119                }:
    +1120                    raise ValueError(f"{ref_tensor} not found in inputs")
    +1121
    +1122        return self
    +1123
    +1124    packaged_by: List[Author] = Field(default_factory=list)
    +1125    """The persons that have packaged and uploaded this model.
    +1126    Only required if those persons differ from the `authors`."""
    +1127
    +1128    parent: Optional[LinkedModel] = None
    +1129    """The model from which this model is derived, e.g. by fine-tuning the weights."""
    +1130
    +1131    @field_validator("parent", mode="before")
    +1132    @classmethod
    +1133    def ignore_url_parent(cls, parent: Any):
    +1134        if isinstance(parent, dict):
    +1135            return None
    +1136
    +1137        else:
    +1138            return parent
    +1139
    +1140    run_mode: Optional[RunMode] = None
    +1141    """Custom run mode for this model: for more complex prediction procedures like test time
    +1142    data augmentation that currently cannot be expressed in the specification.
    +1143    No standard run modes are defined yet."""
    +1144
    +1145    sample_inputs: List[ImportantFileSource] = Field(default_factory=list)
    +1146    """∈📦 URLs/relative paths to sample inputs to illustrate possible inputs for the model,
    +1147    for example stored as PNG or TIFF images.
    +1148    The sample files primarily serve to inform a human user about an example use case"""
    +1149
    +1150    sample_outputs: List[ImportantFileSource] = Field(default_factory=list)
    +1151    """∈📦 URLs/relative paths to sample outputs corresponding to the `sample_inputs`."""
    +1152
    +1153    test_inputs: NotEmpty[
    +1154        List[Annotated[ImportantFileSource, WithSuffix(".npy", case_sensitive=True)]]
    +1155    ]
    +1156    """∈📦 Test input tensors compatible with the `inputs` description for a **single test case**.
    +1157    This means if your model has more than one input, you should provide one URL/relative path for each input.
    +1158    Each test input should be a file with an ndarray in
    +1159    [numpy.lib file format](https://numpy.org/doc/stable/reference/generated/numpy.lib.format.html#module-numpy.lib.format).
    +1160    The extension must be '.npy'."""
    +1161
    +1162    test_outputs: NotEmpty[
    +1163        List[Annotated[ImportantFileSource, WithSuffix(".npy", case_sensitive=True)]]
    +1164    ]
    +1165    """∈📦 Analog to `test_inputs`."""
    +1166
    +1167    timestamp: Datetime
    +1168    """Timestamp in [ISO 8601](#https://en.wikipedia.org/wiki/ISO_8601) format
    +1169    with a few restrictions listed [here](https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat)."""
    +1170
    +1171    training_data: Union[LinkedDataset, DatasetDescr, None] = None
    +1172    """The dataset used to train this model"""
    +1173
    +1174    weights: Annotated[WeightsDescr, WrapSerializer(package_weights)]
    +1175    """The weights for this model.
    +1176    Weights can be given for different formats, but should otherwise be equivalent.
    +1177    The available weight formats determine which consumers can use this model."""
    +1178
    +1179    @model_validator(mode="before")
    +1180    @classmethod
    +1181    def _convert_from_older_format(
    +1182        cls, data: BioimageioYamlContent, /
    +1183    ) -> BioimageioYamlContent:
    +1184        convert_from_older_format(data)
    +1185        return data
    +1186
    +1187    def get_input_test_arrays(self) -> List[NDArray[Any]]:
    +1188        data = [load_array(download(ipt).path) for ipt in self.test_inputs]
    +1189        assert all(isinstance(d, np.ndarray) for d in data)
    +1190        return data
    +1191
    +1192    def get_output_test_arrays(self) -> List[NDArray[Any]]:
    +1193        data = [load_array(download(out).path) for out in self.test_outputs]
    +1194        assert all(isinstance(d, np.ndarray) for d in data)
    +1195        return data
    +
    + + +
    +
    + +
    + + class + ModelId(bioimageio.spec.generic.v0_2.ResourceId): + + + +
    + +
    82class ModelId(ResourceId):
    +83    pass
    +
    + + +

    str(object='') -> str +str(bytes_or_buffer[, encoding[, errors]]) -> str

    + +

    Create a new string object from the given object. If encoding or +errors is specified, then the object must expose a data buffer +that will be decoded using the given encoding and error handler. +Otherwise, returns the result of object.__str__() (if defined) +or repr(object). +encoding defaults to sys.getdefaultencoding(). +errors defaults to 'strict'.

    +
    + + +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    +
    + AxesStr = + + typing.Annotated[str, RestrictCharacters(alphabet='bitczyx'), AfterValidator(func=<function validate_unique_entries>)] + + +
    + + + + +
    +
    +
    + AxesInCZYX = + + typing.Annotated[str, RestrictCharacters(alphabet='czyx'), AfterValidator(func=<function validate_unique_entries>)] + + +
    + + + + +
    +
    +
    + PostprocessingName = + + typing.Literal['binarize', 'clip', 'scale_linear', 'sigmoid', 'zero_mean_unit_variance', 'scale_range', 'scale_mean_variance'] + + +
    + + + + +
    +
    +
    + PreprocessingName = + + typing.Literal['binarize', 'clip', 'scale_linear', 'sigmoid', 'zero_mean_unit_variance', 'scale_range'] + + +
    + + + + +
    +
    + +
    + + class + TensorName(bioimageio.spec._internal.types.LowerCaseIdentifier): + + + +
    + +
    112class TensorName(LowerCaseIdentifier):
    +113    pass
    +
    + + +

    str(object='') -> str +str(bytes_or_buffer[, encoding[, errors]]) -> str

    + +

    Create a new string object from the given object. If encoding or +errors is specified, then the object must expose a data buffer +that will be decoded using the given encoding and error handler. +Otherwise, returns the result of object.__str__() (if defined) +or repr(object). +encoding defaults to sys.getdefaultencoding(). +errors defaults to 'strict'.

    +
    + + +
    +
    + +
    + + class + CallableFromDepencency(bioimageio.spec._internal.common_nodes.StringNode): + + + +
    + +
    116class CallableFromDepencency(StringNode):
    +117    _pattern = r"^.+\..+$"
    +118    _submodule_adapter = TypeAdapter(Identifier)
    +119
    +120    module_name: str
    +121
    +122    @field_validator("module_name", mode="after")
    +123    def check_submodules(cls, module_name: str) -> str:
    +124        for submod in module_name.split("."):
    +125            _ = cls._submodule_adapter.validate_python(submod)
    +126
    +127        return module_name
    +128
    +129    callable_name: Identifier
    +130
    +131    @classmethod
    +132    def _get_data(cls, valid_string_data: str):
    +133        *mods, callname = valid_string_data.split(".")
    +134        return dict(module_name=".".join(mods), callable_name=callname)
    +
    + + +

    deprecated! don't use for new spec fields!

    +
    + + +
    +
    + module_name: str + + +
    + + + + +
    +
    + +
    +
    @field_validator('module_name', mode='after')
    + + def + check_submodules(cls, module_name: str) -> str: + + + +
    + +
    122    @field_validator("module_name", mode="after")
    +123    def check_submodules(cls, module_name: str) -> str:
    +124        for submod in module_name.split("."):
    +125            _ = cls._submodule_adapter.validate_python(submod)
    +126
    +127        return module_name
    +
    + + + + +
    +
    +
    + callable_name: bioimageio.spec._internal.types.Identifier + + +
    + + + + +
    +
    +
    + +
    + + class + CallableFromFile(bioimageio.spec._internal.common_nodes.StringNode): + + + +
    + +
    137class CallableFromFile(StringNode):
    +138    _pattern = r"^.+:.+$"
    +139    source_file: Annotated[
    +140        Union[RelativeFilePath, HttpUrl],
    +141        Field(union_mode="left_to_right"),
    +142        include_in_package_serializer,
    +143    ]
    +144    """∈📦 Python module that implements `callable_name`"""
    +145    callable_name: Identifier
    +146    """The Python identifier of  """
    +147
    +148    @classmethod
    +149    def _get_data(cls, valid_string_data: str):
    +150        *file_parts, callname = valid_string_data.split(":")
    +151        return dict(source_file=":".join(file_parts), callable_name=callname)
    +
    + + +

    deprecated! don't use for new spec fields!

    +
    + + +
    +
    + source_file: Annotated[Union[bioimageio.spec._internal.io.RelativeFilePath, bioimageio.spec._internal.url.HttpUrl], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')] + + +
    + + +

    ∈📦 Python module that implements callable_name

    +
    + + +
    +
    +
    + callable_name: bioimageio.spec._internal.types.Identifier + + +
    + + +

    The Python identifier of

    +
    + + +
    +
    +
    +
    + CustomCallable = + + typing.Annotated[typing.Union[CallableFromFile, CallableFromDepencency], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])] + + +
    + + + + +
    +
    + +
    + + class + Dependencies(bioimageio.spec._internal.common_nodes.StringNode): + + + +
    + +
    159class Dependencies(StringNode):
    +160    _pattern = r"^.+:.+$"
    +161    manager: Annotated[NotEmpty[str], Field(examples=["conda", "maven", "pip"])]
    +162    """Dependency manager"""
    +163
    +164    file: Annotated[
    +165        ImportantFileSource,
    +166        Field(examples=["environment.yaml", "pom.xml", "requirements.txt"]),
    +167    ]
    +168    """∈📦 Dependency file"""
    +169
    +170    @classmethod
    +171    def _get_data(cls, valid_string_data: str):
    +172        manager, *file_parts = valid_string_data.split(":")
    +173        return dict(manager=manager, file=":".join(file_parts))
    +
    + + +

    deprecated! don't use for new spec fields!

    +
    + + +
    +
    + manager: Annotated[str, MinLen(min_length=1), FieldInfo(annotation=NoneType, required=True, examples=['conda', 'maven', 'pip'])] + + +
    + + +

    Dependency manager

    +
    + + +
    +
    +
    + file: Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none'), FieldInfo(annotation=NoneType, required=True, examples=['environment.yaml', 'pom.xml', 'requirements.txt'])] + + +
    + + +

    ∈📦 Dependency file

    +
    + + +
    +
    +
    +
    + WeightsFormat = + + typing.Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript'] + + +
    + + + + +
    +
    + +
    + + class + WeightsDescr(bioimageio.spec._internal.node.Node): + + + +
    + +
    186class WeightsDescr(Node):
    +187    keras_hdf5: Optional[KerasHdf5WeightsDescr] = None
    +188    onnx: Optional[OnnxWeightsDescr] = None
    +189    pytorch_state_dict: Optional[PytorchStateDictWeightsDescr] = None
    +190    tensorflow_js: Optional[TensorflowJsWeightsDescr] = None
    +191    tensorflow_saved_model_bundle: Optional[TensorflowSavedModelBundleWeightsDescr] = (
    +192        None
    +193    )
    +194    torchscript: Optional[TorchscriptWeightsDescr] = None
    +195
    +196    @model_validator(mode="after")
    +197    def check_one_entry(self) -> Self:
    +198        if all(
    +199            entry is None
    +200            for entry in [
    +201                self.keras_hdf5,
    +202                self.onnx,
    +203                self.pytorch_state_dict,
    +204                self.tensorflow_js,
    +205                self.tensorflow_saved_model_bundle,
    +206                self.torchscript,
    +207            ]
    +208        ):
    +209            raise ValueError("Missing weights entry")
    +210
    +211        return self
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + keras_hdf5: Optional[KerasHdf5WeightsDescr] + + +
    + + + + +
    +
    +
    + onnx: Optional[OnnxWeightsDescr] + + +
    + + + + +
    +
    +
    + pytorch_state_dict: Optional[PytorchStateDictWeightsDescr] + + +
    + + + + +
    +
    +
    + tensorflow_js: Optional[TensorflowJsWeightsDescr] + + +
    + + + + +
    +
    +
    + tensorflow_saved_model_bundle: Optional[TensorflowSavedModelBundleWeightsDescr] + + +
    + + + + +
    +
    +
    + torchscript: Optional[TorchscriptWeightsDescr] + + +
    + + + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + check_one_entry(self) -> Self: + + + +
    + +
    196    @model_validator(mode="after")
    +197    def check_one_entry(self) -> Self:
    +198        if all(
    +199            entry is None
    +200            for entry in [
    +201                self.keras_hdf5,
    +202                self.onnx,
    +203                self.pytorch_state_dict,
    +204                self.tensorflow_js,
    +205                self.tensorflow_saved_model_bundle,
    +206                self.torchscript,
    +207            ]
    +208        ):
    +209            raise ValueError("Missing weights entry")
    +210
    +211        return self
    +
    + + + + +
    +
    +
    + +
    + + class + WeightsEntryDescrBase(bioimageio.spec._internal.io.FileDescr): + + + +
    + +
    214class WeightsEntryDescrBase(FileDescr):
    +215    type: ClassVar[WeightsFormat]
    +216    weights_format_name: ClassVar[str]  # human readable
    +217
    +218    source: ImportantFileSource
    +219    """∈📦 The weights file."""
    +220
    +221    attachments: Annotated[
    +222        Union[AttachmentsDescr, None],
    +223        warn(None, "Weights entry depends on additional attachments.", ALERT),
    +224    ] = None
    +225    """Attachments that are specific to this weights entry."""
    +226
    +227    authors: Union[List[Author], None] = None
    +228    """Authors
    +229    Either the person(s) that have trained this model resulting in the original weights file.
    +230        (If this is the initial weights entry, i.e. it does not have a `parent`)
    +231    Or the person(s) who have converted the weights to this weights format.
    +232        (If this is a child weight, i.e. it has a `parent` field)
    +233    """
    +234
    +235    dependencies: Annotated[
    +236        Optional[Dependencies],
    +237        warn(
    +238            None,
    +239            "Custom dependencies ({value}) specified. Avoid this whenever possible "
    +240            + "to allow execution in a wider range of software environments.",
    +241        ),
    +242        Field(
    +243            examples=[
    +244                "conda:environment.yaml",
    +245                "maven:./pom.xml",
    +246                "pip:./requirements.txt",
    +247            ]
    +248        ),
    +249    ] = None
    +250    """Dependency manager and dependency file, specified as `<dependency manager>:<relative file path>`."""
    +251
    +252    parent: Annotated[
    +253        Optional[WeightsFormat], Field(examples=["pytorch_state_dict"])
    +254    ] = None
    +255    """The source weights these weights were converted from.
    +256    For example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,
    +257    The `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.
    +258    All weight entries except one (the initial set of weights resulting from training the model),
    +259    need to have this field."""
    +260
    +261    @model_validator(mode="after")
    +262    def check_parent_is_not_self(self) -> Self:
    +263        if self.type == self.parent:
    +264            raise ValueError("Weights entry can't be it's own parent.")
    +265
    +266        return self
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type: ClassVar[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']] + + +
    + + + + +
    +
    +
    + weights_format_name: ClassVar[str] + + +
    + + + + +
    +
    +
    + source: Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')] + + +
    + + +

    ∈📦 The weights file.

    +
    + + +
    +
    +
    + attachments: Annotated[Optional[bioimageio.spec.generic.v0_2.AttachmentsDescr], AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f5370628360>, severity=35, msg='Weights entry depends on additional attachments.', context={'typ': None})] + + +
    + + +

    Attachments that are specific to this weights entry.

    +
    + + +
    +
    +
    + authors: Optional[List[bioimageio.spec.generic.v0_2.Author]] + + +
    + + +

    Authors +Either the person(s) that have trained this model resulting in the original weights file. + (If this is the initial weights entry, i.e. it does not have a parent) +Or the person(s) who have converted the weights to this weights format. + (If this is a child weight, i.e. it has a parent field)

    +
    + + +
    +
    +
    + dependencies: Annotated[Optional[Dependencies], AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f53706289a0>, severity=30, msg='Custom dependencies ({value}) specified. Avoid this whenever possible to allow execution in a wider range of software environments.', context={'typ': None}), FieldInfo(annotation=NoneType, required=True, examples=['conda:environment.yaml', 'maven:./pom.xml', 'pip:./requirements.txt'])] + + +
    + + +

    Dependency manager and dependency file, specified as <dependency manager>:<relative file path>.

    +
    + + +
    +
    +
    + parent: Annotated[Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']], FieldInfo(annotation=NoneType, required=True, examples=['pytorch_state_dict'])] + + +
    + + +

    The source weights these weights were converted from. +For example, if a model's weights were converted from the pytorch_state_dict format to torchscript, +The pytorch_state_dict weights entry has no parent and is the parent of the torchscript weights. +All weight entries except one (the initial set of weights resulting from training the model), +need to have this field.

    +
    + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + check_parent_is_not_self(self) -> Self: + + + +
    + +
    261    @model_validator(mode="after")
    +262    def check_parent_is_not_self(self) -> Self:
    +263        if self.type == self.parent:
    +264            raise ValueError("Weights entry can't be it's own parent.")
    +265
    +266        return self
    +
    + + + + +
    +
    +
    + +
    + + class + KerasHdf5WeightsDescr(WeightsEntryDescrBase): + + + +
    + +
    269class KerasHdf5WeightsDescr(WeightsEntryDescrBase):
    +270    type = "keras_hdf5"
    +271    weights_format_name: ClassVar[str] = "Keras HDF5"
    +272    tensorflow_version: Optional[Version] = None
    +273    """TensorFlow version used to create these weights"""
    +274
    +275    @field_validator("tensorflow_version", mode="after")
    +276    @classmethod
    +277    def _tfv(cls, value: Any):
    +278        if value is None:
    +279            issue_warning(
    +280                "missing. Please specify the TensorFlow version"
    +281                + " these weights were created with.",
    +282                value=value,
    +283                severity=ALERT,
    +284                field="tensorflow_version",
    +285            )
    +286        return value
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type = +'keras_hdf5' + + +
    + + + + +
    +
    +
    + weights_format_name: ClassVar[str] = +'Keras HDF5' + + +
    + + + + +
    +
    +
    + tensorflow_version: Optional[bioimageio.spec._internal.version_type.Version] + + +
    + + +

    TensorFlow version used to create these weights

    +
    + + +
    + +
    +
    + +
    + + class + OnnxWeightsDescr(WeightsEntryDescrBase): + + + +
    + +
    289class OnnxWeightsDescr(WeightsEntryDescrBase):
    +290    type = "onnx"
    +291    weights_format_name: ClassVar[str] = "ONNX"
    +292    opset_version: Optional[Annotated[int, Ge(7)]] = None
    +293    """ONNX opset version"""
    +294
    +295    @field_validator("opset_version", mode="after")
    +296    @classmethod
    +297    def _ov(cls, value: Any):
    +298        if value is None:
    +299            issue_warning(
    +300                "Missing ONNX opset version (aka ONNX opset number). "
    +301                + "Please specify the ONNX opset version these weights were created"
    +302                + " with.",
    +303                value=value,
    +304                severity=ALERT,
    +305                field="opset_version",
    +306            )
    +307        return value
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type = +'onnx' + + +
    + + + + +
    +
    +
    + weights_format_name: ClassVar[str] = +'ONNX' + + +
    + + + + +
    +
    +
    + opset_version: Optional[Annotated[int, Ge(ge=7)]] + + +
    + + +

    ONNX opset version

    +
    + + +
    + +
    +
    + +
    + + class + PytorchStateDictWeightsDescr(WeightsEntryDescrBase): + + + +
    + +
    310class PytorchStateDictWeightsDescr(WeightsEntryDescrBase):
    +311    type = "pytorch_state_dict"
    +312    weights_format_name: ClassVar[str] = "Pytorch State Dict"
    +313    architecture: CustomCallable = Field(
    +314        examples=["my_function.py:MyNetworkClass", "my_module.submodule.get_my_model"]
    +315    )
    +316    """callable returning a torch.nn.Module instance.
    +317    Local implementation: `<relative path to file>:<identifier of implementation within the file>`.
    +318    Implementation in a dependency: `<dependency-package>.<[dependency-module]>.<identifier>`."""
    +319
    +320    architecture_sha256: Annotated[
    +321        Optional[Sha256],
    +322        Field(
    +323            description=(
    +324                "The SHA256 of the architecture source file, if the architecture is not"
    +325                " defined in a module listed in `dependencies`\n"
    +326            )
    +327            + SHA256_HINT,
    +328        ),
    +329    ] = None
    +330    """The SHA256 of the architecture source file,
    +331    if the architecture is not defined in a module listed in `dependencies`"""
    +332
    +333    @model_validator(mode="after")
    +334    def check_architecture_sha256(self) -> Self:
    +335        if isinstance(self.architecture, CallableFromFile):
    +336            if self.architecture_sha256 is None:
    +337                raise ValueError(
    +338                    "Missing required `architecture_sha256` for `architecture` with"
    +339                    + " source file."
    +340                )
    +341        elif self.architecture_sha256 is not None:
    +342            raise ValueError(
    +343                "Got `architecture_sha256` for architecture that does not have a source"
    +344                + " file."
    +345            )
    +346
    +347        return self
    +348
    +349    kwargs: Dict[str, Any] = Field(default_factory=dict)
    +350    """key word arguments for the `architecture` callable"""
    +351
    +352    pytorch_version: Optional[Version] = None
    +353    """Version of the PyTorch library used.
    +354    If `depencencies` is specified it should include pytorch and the verison has to match.
    +355    (`dependencies` overrules `pytorch_version`)"""
    +356
    +357    @field_validator("pytorch_version", mode="after")
    +358    @classmethod
    +359    def _ptv(cls, value: Any):
    +360        if value is None:
    +361            issue_warning(
    +362                "missing. Please specify the PyTorch version these"
    +363                + " PyTorch state dict weights were created with.",
    +364                value=value,
    +365                severity=ALERT,
    +366                field="pytorch_version",
    +367            )
    +368        return value
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type = +'pytorch_state_dict' + + +
    + + + + +
    +
    +
    + weights_format_name: ClassVar[str] = +'Pytorch State Dict' + + +
    + + + + +
    +
    +
    + architecture: Annotated[Union[CallableFromFile, CallableFromDepencency], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])] + + +
    + + +

    callable returning a torch.nn.Module instance. +Local implementation: <relative path to file>:<identifier of implementation within the file>. +Implementation in a dependency: <dependency-package>.<[dependency-module]>.<identifier>.

    +
    + + +
    +
    +
    + architecture_sha256: Annotated[Optional[bioimageio.spec._internal.io_basics.Sha256], FieldInfo(annotation=NoneType, required=True, description="The SHA256 of the architecture source file, if the architecture is not defined in a module listed in `dependencies`\nYou can drag and drop your file to this\n[online tool](http://emn178.github.io/online-tools/sha256_checksum.html) to generate a SHA256 in your browser.\nOr you can generate a SHA256 checksum with Python's `hashlib`,\n[here is a codesnippet](https://gist.github.com/FynnBe/e64460463df89439cff218bbf59c1100).")] + + +
    + + +

    The SHA256 of the architecture source file, +if the architecture is not defined in a module listed in dependencies

    +
    + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + check_architecture_sha256(self) -> Self: + + + +
    + +
    333    @model_validator(mode="after")
    +334    def check_architecture_sha256(self) -> Self:
    +335        if isinstance(self.architecture, CallableFromFile):
    +336            if self.architecture_sha256 is None:
    +337                raise ValueError(
    +338                    "Missing required `architecture_sha256` for `architecture` with"
    +339                    + " source file."
    +340                )
    +341        elif self.architecture_sha256 is not None:
    +342            raise ValueError(
    +343                "Got `architecture_sha256` for architecture that does not have a source"
    +344                + " file."
    +345            )
    +346
    +347        return self
    +
    + + + + +
    +
    +
    + kwargs: Dict[str, Any] + + +
    + + +

    key word arguments for the architecture callable

    +
    + + +
    +
    +
    + pytorch_version: Optional[bioimageio.spec._internal.version_type.Version] + + +
    + + +

    Version of the PyTorch library used. +If depencencies is specified it should include pytorch and the verison has to match. +(dependencies overrules pytorch_version)

    +
    + + +
    + +
    +
    + +
    + + class + TorchscriptWeightsDescr(WeightsEntryDescrBase): + + + +
    + +
    371class TorchscriptWeightsDescr(WeightsEntryDescrBase):
    +372    type = "torchscript"
    +373    weights_format_name: ClassVar[str] = "TorchScript"
    +374    pytorch_version: Optional[Version] = None
    +375    """Version of the PyTorch library used."""
    +376
    +377    @field_validator("pytorch_version", mode="after")
    +378    @classmethod
    +379    def _ptv(cls, value: Any):
    +380        if value is None:
    +381            issue_warning(
    +382                "missing. Please specify the PyTorch version these"
    +383                + " Torchscript weights were created with.",
    +384                value=value,
    +385                severity=ALERT,
    +386                field="pytorch_version",
    +387            )
    +388        return value
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type = +'torchscript' + + +
    + + + + +
    +
    +
    + weights_format_name: ClassVar[str] = +'TorchScript' + + +
    + + + + +
    +
    +
    + pytorch_version: Optional[bioimageio.spec._internal.version_type.Version] + + +
    + + +

    Version of the PyTorch library used.

    +
    + + +
    + +
    +
    + +
    + + class + TensorflowJsWeightsDescr(WeightsEntryDescrBase): + + + +
    + +
    391class TensorflowJsWeightsDescr(WeightsEntryDescrBase):
    +392    type = "tensorflow_js"
    +393    weights_format_name: ClassVar[str] = "Tensorflow.js"
    +394    tensorflow_version: Optional[Version] = None
    +395    """Version of the TensorFlow library used."""
    +396
    +397    @field_validator("tensorflow_version", mode="after")
    +398    @classmethod
    +399    def _tfv(cls, value: Any):
    +400        if value is None:
    +401            issue_warning(
    +402                "missing. Please specify the TensorFlow version"
    +403                + " these TensorflowJs weights were created with.",
    +404                value=value,
    +405                severity=ALERT,
    +406                field="tensorflow_version",
    +407            )
    +408        return value
    +409
    +410    source: ImportantFileSource
    +411    """∈📦 The multi-file weights.
    +412    All required files/folders should be a zip archive."""
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type = +'tensorflow_js' + + +
    + + + + +
    +
    +
    + weights_format_name: ClassVar[str] = +'Tensorflow.js' + + +
    + + + + +
    +
    +
    + tensorflow_version: Optional[bioimageio.spec._internal.version_type.Version] + + +
    + + +

    Version of the TensorFlow library used.

    +
    + + +
    +
    +
    + source: Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')] + + +
    + + +

    ∈📦 The multi-file weights. +All required files/folders should be a zip archive.

    +
    + + +
    + +
    +
    + +
    + + class + TensorflowSavedModelBundleWeightsDescr(WeightsEntryDescrBase): + + + +
    + +
    415class TensorflowSavedModelBundleWeightsDescr(WeightsEntryDescrBase):
    +416    type = "tensorflow_saved_model_bundle"
    +417    weights_format_name: ClassVar[str] = "Tensorflow Saved Model"
    +418    tensorflow_version: Optional[Version] = None
    +419    """Version of the TensorFlow library used."""
    +420
    +421    @field_validator("tensorflow_version", mode="after")
    +422    @classmethod
    +423    def _tfv(cls, value: Any):
    +424        if value is None:
    +425            issue_warning(
    +426                "missing. Please specify the TensorFlow version"
    +427                + " these Tensorflow saved model bundle weights were created with.",
    +428                value=value,
    +429                severity=ALERT,
    +430                field="tensorflow_version",
    +431            )
    +432        return value
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type = +'tensorflow_saved_model_bundle' + + +
    + + + + +
    +
    +
    + weights_format_name: ClassVar[str] = +'Tensorflow Saved Model' + + +
    + + + + +
    +
    +
    + tensorflow_version: Optional[bioimageio.spec._internal.version_type.Version] + + +
    + + +

    Version of the TensorFlow library used.

    +
    + + +
    + +
    +
    + +
    + + class + ParameterizedInputShape(bioimageio.spec._internal.node.Node): + + + +
    + +
    435class ParameterizedInputShape(Node):
    +436    """A sequence of valid shapes given by `shape_k = min + k * step for k in {0, 1, ...}`."""
    +437
    +438    min: NotEmpty[List[int]]
    +439    """The minimum input shape"""
    +440
    +441    step: NotEmpty[List[int]]
    +442    """The minimum shape change"""
    +443
    +444    def __len__(self) -> int:
    +445        return len(self.min)
    +446
    +447    @model_validator(mode="after")
    +448    def matching_lengths(self) -> Self:
    +449        if len(self.min) != len(self.step):
    +450            raise ValueError("`min` and `step` required to have the same length")
    +451
    +452        return self
    +
    + + +

    A sequence of valid shapes given by shape_k = min + k * step for k in {0, 1, ...}.

    +
    + + +
    +
    + min: Annotated[List[int], MinLen(min_length=1)] + + +
    + + +

    The minimum input shape

    +
    + + +
    +
    +
    + step: Annotated[List[int], MinLen(min_length=1)] + + +
    + + +

    The minimum shape change

    +
    + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + matching_lengths(self) -> Self: + + + +
    + +
    447    @model_validator(mode="after")
    +448    def matching_lengths(self) -> Self:
    +449        if len(self.min) != len(self.step):
    +450            raise ValueError("`min` and `step` required to have the same length")
    +451
    +452        return self
    +
    + + + + +
    +
    +
    + +
    + + class + ImplicitOutputShape(bioimageio.spec._internal.node.Node): + + + +
    + +
    455class ImplicitOutputShape(Node):
    +456    """Output tensor shape depending on an input tensor shape.
    +457    `shape(output_tensor) = shape(input_tensor) * scale + 2 * offset`"""
    +458
    +459    reference_tensor: TensorName
    +460    """Name of the reference tensor."""
    +461
    +462    scale: NotEmpty[List[Optional[float]]]
    +463    """output_pix/input_pix for each dimension.
    +464    'null' values indicate new dimensions, whose length is defined by 2*`offset`"""
    +465
    +466    offset: NotEmpty[List[Union[int, Annotated[float, MultipleOf(0.5)]]]]
    +467    """Position of origin wrt to input."""
    +468
    +469    def __len__(self) -> int:
    +470        return len(self.scale)
    +471
    +472    @model_validator(mode="after")
    +473    def matching_lengths(self) -> Self:
    +474        if len(self.scale) != len(self.offset):
    +475            raise ValueError(
    +476                f"scale {self.scale} has to have same length as offset {self.offset}!"
    +477            )
    +478        # if we have an expanded dimension, make sure that it's offet is not zero
    +479        for sc, off in zip(self.scale, self.offset):
    +480            if sc is None and not off:
    +481                raise ValueError("`offset` must not be zero if `scale` is none/zero")
    +482
    +483        return self
    +
    + + +

    Output tensor shape depending on an input tensor shape. +shape(output_tensor) = shape(input_tensor) * scale + 2 * offset

    +
    + + +
    +
    + reference_tensor: TensorName + + +
    + + +

    Name of the reference tensor.

    +
    + + +
    +
    +
    + scale: Annotated[List[Optional[float]], MinLen(min_length=1)] + + +
    + + +

    output_pix/input_pix for each dimension. +'null' values indicate new dimensions, whose length is defined by 2*offset

    +
    + + +
    +
    +
    + offset: Annotated[List[Union[int, Annotated[float, MultipleOf(multiple_of=0.5)]]], MinLen(min_length=1)] + + +
    + + +

    Position of origin wrt to input.

    +
    + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + matching_lengths(self) -> Self: + + + +
    + +
    472    @model_validator(mode="after")
    +473    def matching_lengths(self) -> Self:
    +474        if len(self.scale) != len(self.offset):
    +475            raise ValueError(
    +476                f"scale {self.scale} has to have same length as offset {self.offset}!"
    +477            )
    +478        # if we have an expanded dimension, make sure that it's offet is not zero
    +479        for sc, off in zip(self.scale, self.offset):
    +480            if sc is None and not off:
    +481                raise ValueError("`offset` must not be zero if `scale` is none/zero")
    +482
    +483        return self
    +
    + + + + +
    +
    +
    + +
    + + class + TensorDescrBase(bioimageio.spec._internal.node.Node): + + + +
    + +
    486class TensorDescrBase(Node):
    +487    name: TensorName
    +488    """Tensor name. No duplicates are allowed."""
    +489
    +490    description: str = ""
    +491
    +492    axes: AxesStr
    +493    """Axes identifying characters. Same length and order as the axes in `shape`.
    +494    | axis | description |
    +495    | --- | --- |
    +496    |  b  |  batch (groups multiple samples) |
    +497    |  i  |  instance/index/element |
    +498    |  t  |  time |
    +499    |  c  |  channel |
    +500    |  z  |  spatial dimension z |
    +501    |  y  |  spatial dimension y |
    +502    |  x  |  spatial dimension x |
    +503    """
    +504
    +505    data_range: Optional[
    +506        Tuple[Annotated[float, AllowInfNan(True)], Annotated[float, AllowInfNan(True)]]
    +507    ] = None
    +508    """Tuple `(minimum, maximum)` specifying the allowed range of the data in this tensor.
    +509    If not specified, the full data range that can be expressed in `data_type` is allowed."""
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + name: TensorName + + +
    + + +

    Tensor name. No duplicates are allowed.

    +
    + + +
    +
    +
    + description: str + + +
    + + + + +
    +
    +
    + axes: Annotated[str, RestrictCharacters(alphabet='bitczyx'), AfterValidator(func=<function validate_unique_entries at 0x7f5383752ac0>)] + + +
    + + +

    Axes identifying characters. Same length and order as the axes in shape.

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    axisdescription
    bbatch (groups multiple samples)
    iinstance/index/element
    ttime
    cchannel
    zspatial dimension z
    yspatial dimension y
    xspatial dimension x
    +
    + + +
    +
    +
    + data_range: Optional[Tuple[Annotated[float, AllowInfNan(allow_inf_nan=True)], Annotated[float, AllowInfNan(allow_inf_nan=True)]]] + + +
    + + +

    Tuple (minimum, maximum) specifying the allowed range of the data in this tensor. +If not specified, the full data range that can be expressed in data_type is allowed.

    +
    + + +
    +
    +
    + +
    + + class + ProcessingKwargs(bioimageio.spec._internal.common_nodes.KwargsNode): + + + +
    + +
    512class ProcessingKwargs(KwargsNode):
    +513    """base class for pre-/postprocessing key word arguments"""
    +
    + + +

    base class for pre-/postprocessing key word arguments

    +
    + + +
    +
    + +
    + + class + ProcessingDescrBase(bioimageio.spec._internal.common_nodes.NodeWithExplicitlySetFields): + + + +
    + +
    516class ProcessingDescrBase(NodeWithExplicitlySetFields):
    +517    """processing base class"""
    +518
    +519    # name: Literal[PreprocessingName, PostprocessingName]  # todo: make abstract field
    +520    fields_to_set_explicitly: ClassVar[FrozenSet[LiteralString]] = frozenset({"name"})
    +
    + + +

    processing base class

    +
    + + +
    +
    + fields_to_set_explicitly: ClassVar[FrozenSet[LiteralString]] = +frozenset({'name'}) + + +
    + + +

    set set these fields explicitly with their default value if they are not set, +such that they are always included even when dumping with 'exlude_unset'

    +
    + + +
    +
    +
    + +
    + + class + BinarizeKwargs(ProcessingKwargs): + + + +
    + +
    523class BinarizeKwargs(ProcessingKwargs):
    +524    """key word arguments for `BinarizeDescr`"""
    +525
    +526    threshold: float
    +527    """The fixed threshold"""
    +
    + + +

    key word arguments for BinarizeDescr

    +
    + + +
    +
    + threshold: float + + +
    + + +

    The fixed threshold

    +
    + + +
    +
    +
    + +
    + + class + BinarizeDescr(ProcessingDescrBase): + + + +
    + +
    530class BinarizeDescr(ProcessingDescrBase):
    +531    """BinarizeDescr the tensor with a fixed `BinarizeKwargs.threshold`.
    +532    Values above the threshold will be set to one, values below the threshold to zero.
    +533    """
    +534
    +535    name: Literal["binarize"] = "binarize"
    +536    kwargs: BinarizeKwargs
    +
    + + +

    BinarizeDescr the tensor with a fixed BinarizeKwargs.threshold. +Values above the threshold will be set to one, values below the threshold to zero.

    +
    + + +
    +
    + name: Literal['binarize'] + + +
    + + + + +
    +
    +
    + kwargs: BinarizeKwargs + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + ClipKwargs(ProcessingKwargs): + + + +
    + +
    539class ClipKwargs(ProcessingKwargs):
    +540    """key word arguments for `ClipDescr`"""
    +541
    +542    min: float
    +543    """minimum value for clipping"""
    +544    max: float
    +545    """maximum value for clipping"""
    +
    + + +

    key word arguments for ClipDescr

    +
    + + +
    +
    + min: float + + +
    + + +

    minimum value for clipping

    +
    + + +
    +
    +
    + max: float + + +
    + + +

    maximum value for clipping

    +
    + + +
    +
    +
    + +
    + + class + ClipDescr(ProcessingDescrBase): + + + +
    + +
    548class ClipDescr(ProcessingDescrBase):
    +549    """Clip tensor values to a range.
    +550
    +551    Set tensor values below `ClipKwargs.min` to `ClipKwargs.min`
    +552    and above `ClipKwargs.max` to `ClipKwargs.max`.
    +553    """
    +554
    +555    name: Literal["clip"] = "clip"
    +556
    +557    kwargs: ClipKwargs
    +
    + + +

    Clip tensor values to a range.

    + +

    Set tensor values below ClipKwargs.min to ClipKwargs.min +and above ClipKwargs.max to ClipKwargs.max.

    +
    + + +
    +
    + name: Literal['clip'] + + +
    + + + + +
    +
    +
    + kwargs: ClipKwargs + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + ScaleLinearKwargs(ProcessingKwargs): + + + +
    + +
    560class ScaleLinearKwargs(ProcessingKwargs):
    +561    """key word arguments for `ScaleLinearDescr`"""
    +562
    +563    axes: Annotated[Optional[AxesInCZYX], Field(examples=["xy"])] = None
    +564    """The subset of axes to scale jointly.
    +565    For example xy to scale the two image axes for 2d data jointly."""
    +566
    +567    gain: Union[float, List[float]] = 1.0
    +568    """multiplicative factor"""
    +569
    +570    offset: Union[float, List[float]] = 0.0
    +571    """additive term"""
    +572
    +573    @model_validator(mode="after")
    +574    def either_gain_or_offset(self) -> Self:
    +575        if (
    +576            self.gain == 1.0
    +577            or isinstance(self.gain, list)
    +578            and all(g == 1.0 for g in self.gain)
    +579        ) and (
    +580            self.offset == 0.0
    +581            or isinstance(self.offset, list)
    +582            and all(off == 0.0 for off in self.offset)
    +583        ):
    +584            raise ValueError(
    +585                "Redunt linear scaling not allowd. Set `gain` != 1.0 and/or `offset` !="
    +586                + " 0.0."
    +587            )
    +588
    +589        return self
    +
    + + +

    key word arguments for ScaleLinearDescr

    +
    + + +
    +
    + axes: Annotated[Optional[Annotated[str, RestrictCharacters(alphabet='czyx'), AfterValidator(func=<function validate_unique_entries at 0x7f5383752ac0>)]], FieldInfo(annotation=NoneType, required=True, examples=['xy'])] + + +
    + + +

    The subset of axes to scale jointly. +For example xy to scale the two image axes for 2d data jointly.

    +
    + + +
    +
    +
    + gain: Union[float, List[float]] + + +
    + + +

    multiplicative factor

    +
    + + +
    +
    +
    + offset: Union[float, List[float]] + + +
    + + +

    additive term

    +
    + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + either_gain_or_offset(self) -> Self: + + + +
    + +
    573    @model_validator(mode="after")
    +574    def either_gain_or_offset(self) -> Self:
    +575        if (
    +576            self.gain == 1.0
    +577            or isinstance(self.gain, list)
    +578            and all(g == 1.0 for g in self.gain)
    +579        ) and (
    +580            self.offset == 0.0
    +581            or isinstance(self.offset, list)
    +582            and all(off == 0.0 for off in self.offset)
    +583        ):
    +584            raise ValueError(
    +585                "Redunt linear scaling not allowd. Set `gain` != 1.0 and/or `offset` !="
    +586                + " 0.0."
    +587            )
    +588
    +589        return self
    +
    + + + + +
    +
    +
    + +
    + + class + ScaleLinearDescr(ProcessingDescrBase): + + + +
    + +
    592class ScaleLinearDescr(ProcessingDescrBase):
    +593    """Fixed linear scaling."""
    +594
    +595    name: Literal["scale_linear"] = "scale_linear"
    +596    kwargs: ScaleLinearKwargs
    +
    + + +

    Fixed linear scaling.

    +
    + + +
    +
    + name: Literal['scale_linear'] + + +
    + + + + +
    +
    +
    + kwargs: ScaleLinearKwargs + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + SigmoidDescr(ProcessingDescrBase): + + + +
    + +
    599class SigmoidDescr(ProcessingDescrBase):
    +600    """The logistic sigmoid funciton, a.k.a. expit function."""
    +601
    +602    name: Literal["sigmoid"] = "sigmoid"
    +603
    +604    @property
    +605    def kwargs(self) -> ProcessingKwargs:
    +606        """empty kwargs"""
    +607        return ProcessingKwargs()
    +
    + + +

    The logistic sigmoid funciton, a.k.a. expit function.

    +
    + + +
    +
    + name: Literal['sigmoid'] + + +
    + + + + +
    +
    + +
    + kwargs: ProcessingKwargs + + + +
    + +
    604    @property
    +605    def kwargs(self) -> ProcessingKwargs:
    +606        """empty kwargs"""
    +607        return ProcessingKwargs()
    +
    + + +

    empty kwargs

    +
    + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + ZeroMeanUnitVarianceKwargs(ProcessingKwargs): + + + +
    + +
    610class ZeroMeanUnitVarianceKwargs(ProcessingKwargs):
    +611    """key word arguments for `ZeroMeanUnitVarianceDescr`"""
    +612
    +613    mode: Literal["fixed", "per_dataset", "per_sample"] = "fixed"
    +614    """Mode for computing mean and variance.
    +615    |     mode    |             description              |
    +616    | ----------- | ------------------------------------ |
    +617    |   fixed     | Fixed values for mean and variance   |
    +618    | per_dataset | Compute for the entire dataset       |
    +619    | per_sample  | Compute for each sample individually |
    +620    """
    +621    axes: Annotated[AxesInCZYX, Field(examples=["xy"])]
    +622    """The subset of axes to normalize jointly.
    +623    For example `xy` to normalize the two image axes for 2d data jointly."""
    +624
    +625    mean: Annotated[
    +626        Union[float, NotEmpty[List[float]], None], Field(examples=[(1.1, 2.2, 3.3)])
    +627    ] = None
    +628    """The mean value(s) to use for `mode: fixed`.
    +629    For example `[1.1, 2.2, 3.3]` in the case of a 3 channel image with `axes: xy`."""
    +630    # todo: check if means match input axes (for mode 'fixed')
    +631
    +632    std: Annotated[
    +633        Union[float, NotEmpty[List[float]], None], Field(examples=[(0.1, 0.2, 0.3)])
    +634    ] = None
    +635    """The standard deviation values to use for `mode: fixed`. Analogous to mean."""
    +636
    +637    eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-6
    +638    """epsilon for numeric stability: `out = (tensor - mean) / (std + eps)`."""
    +639
    +640    @model_validator(mode="after")
    +641    def mean_and_std_match_mode(self) -> Self:
    +642        if self.mode == "fixed" and (self.mean is None or self.std is None):
    +643            raise ValueError("`mean` and `std` are required for `mode: fixed`.")
    +644        elif self.mode != "fixed" and (self.mean is not None or self.std is not None):
    +645            raise ValueError(f"`mean` and `std` not allowed for `mode: {self.mode}`")
    +646
    +647        return self
    +
    + + +

    key word arguments for ZeroMeanUnitVarianceDescr

    +
    + + +
    +
    + mode: Literal['fixed', 'per_dataset', 'per_sample'] + + +
    + + +

    Mode for computing mean and variance.

    + + + + + + + + + + + + + + + + + + + + + + +
    modedescription
    fixedFixed values for mean and variance
    per_datasetCompute for the entire dataset
    per_sampleCompute for each sample individually
    +
    + + +
    +
    +
    + axes: Annotated[str, RestrictCharacters(alphabet='czyx'), AfterValidator(func=<function validate_unique_entries at 0x7f5383752ac0>), FieldInfo(annotation=NoneType, required=True, examples=['xy'])] + + +
    + + +

    The subset of axes to normalize jointly. +For example xy to normalize the two image axes for 2d data jointly.

    +
    + + +
    +
    +
    + mean: Annotated[Union[float, Annotated[List[float], MinLen(min_length=1)], NoneType], FieldInfo(annotation=NoneType, required=True, examples=[(1.1, 2.2, 3.3)])] + + +
    + + +

    The mean value(s) to use for mode: fixed. +For example [1.1, 2.2, 3.3] in the case of a 3 channel image with axes: xy.

    +
    + + +
    +
    +
    + std: Annotated[Union[float, Annotated[List[float], MinLen(min_length=1)], NoneType], FieldInfo(annotation=NoneType, required=True, examples=[(0.1, 0.2, 0.3)])] + + +
    + + +

    The standard deviation values to use for mode: fixed. Analogous to mean.

    +
    + + +
    +
    +
    + eps: Annotated[float, Interval(gt=0, ge=None, lt=None, le=0.1)] + + +
    + + +

    epsilon for numeric stability: out = (tensor - mean) / (std + eps).

    +
    + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + mean_and_std_match_mode(self) -> Self: + + + +
    + +
    640    @model_validator(mode="after")
    +641    def mean_and_std_match_mode(self) -> Self:
    +642        if self.mode == "fixed" and (self.mean is None or self.std is None):
    +643            raise ValueError("`mean` and `std` are required for `mode: fixed`.")
    +644        elif self.mode != "fixed" and (self.mean is not None or self.std is not None):
    +645            raise ValueError(f"`mean` and `std` not allowed for `mode: {self.mode}`")
    +646
    +647        return self
    +
    + + + + +
    +
    +
    + +
    + + class + ZeroMeanUnitVarianceDescr(ProcessingDescrBase): + + + +
    + +
    650class ZeroMeanUnitVarianceDescr(ProcessingDescrBase):
    +651    """Subtract mean and divide by variance."""
    +652
    +653    name: Literal["zero_mean_unit_variance"] = "zero_mean_unit_variance"
    +654    kwargs: ZeroMeanUnitVarianceKwargs
    +
    + + +

    Subtract mean and divide by variance.

    +
    + + +
    +
    + name: Literal['zero_mean_unit_variance'] + + +
    + + + + +
    +
    +
    + kwargs: ZeroMeanUnitVarianceKwargs + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + ScaleRangeKwargs(ProcessingKwargs): + + + +
    + +
    657class ScaleRangeKwargs(ProcessingKwargs):
    +658    """key word arguments for `ScaleRangeDescr`
    +659
    +660    For `min_percentile`=0.0 (the default) and `max_percentile`=100 (the default)
    +661    this processing step normalizes data to the [0, 1] intervall.
    +662    For other percentiles the normalized values will partially be outside the [0, 1]
    +663    intervall. Use `ScaleRange` followed by `ClipDescr` if you want to limit the
    +664    normalized values to a range.
    +665    """
    +666
    +667    mode: Literal["per_dataset", "per_sample"]
    +668    """Mode for computing percentiles.
    +669    |     mode    |             description              |
    +670    | ----------- | ------------------------------------ |
    +671    | per_dataset | compute for the entire dataset       |
    +672    | per_sample  | compute for each sample individually |
    +673    """
    +674    axes: Annotated[AxesInCZYX, Field(examples=["xy"])]
    +675    """The subset of axes to normalize jointly.
    +676    For example xy to normalize the two image axes for 2d data jointly."""
    +677
    +678    min_percentile: Annotated[Union[int, float], Interval(ge=0, lt=100)] = 0.0
    +679    """The lower percentile used to determine the value to align with zero."""
    +680
    +681    max_percentile: Annotated[Union[int, float], Interval(gt=1, le=100)] = 100.0
    +682    """The upper percentile used to determine the value to align with one.
    +683    Has to be bigger than `min_percentile`.
    +684    The range is 1 to 100 instead of 0 to 100 to avoid mistakenly
    +685    accepting percentiles specified in the range 0.0 to 1.0."""
    +686
    +687    @model_validator(mode="after")
    +688    def min_smaller_max(self, info: ValidationInfo) -> Self:
    +689        if self.min_percentile >= self.max_percentile:
    +690            raise ValueError(
    +691                f"min_percentile {self.min_percentile} >= max_percentile"
    +692                + f" {self.max_percentile}"
    +693            )
    +694
    +695        return self
    +696
    +697    eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-6
    +698    """Epsilon for numeric stability.
    +699    `out = (tensor - v_lower) / (v_upper - v_lower + eps)`;
    +700    with `v_lower,v_upper` values at the respective percentiles."""
    +701
    +702    reference_tensor: Optional[TensorName] = None
    +703    """Tensor name to compute the percentiles from. Default: The tensor itself.
    +704    For any tensor in `inputs` only input tensor references are allowed.
    +705    For a tensor in `outputs` only input tensor refereences are allowed if `mode: per_dataset`"""
    +
    + + +

    key word arguments for ScaleRangeDescr

    + +

    For min_percentile=0.0 (the default) and max_percentile=100 (the default) +this processing step normalizes data to the [0, 1] intervall. +For other percentiles the normalized values will partially be outside the [0, 1] +intervall. Use ScaleRange followed by ClipDescr if you want to limit the +normalized values to a range.

    +
    + + +
    +
    + mode: Literal['per_dataset', 'per_sample'] + + +
    + + +

    Mode for computing percentiles.

    + + + + + + + + + + + + + + + + + + +
    modedescription
    per_datasetcompute for the entire dataset
    per_samplecompute for each sample individually
    +
    + + +
    +
    +
    + axes: Annotated[str, RestrictCharacters(alphabet='czyx'), AfterValidator(func=<function validate_unique_entries at 0x7f5383752ac0>), FieldInfo(annotation=NoneType, required=True, examples=['xy'])] + + +
    + + +

    The subset of axes to normalize jointly. +For example xy to normalize the two image axes for 2d data jointly.

    +
    + + +
    +
    +
    + min_percentile: Annotated[Union[int, float], Interval(gt=None, ge=0, lt=100, le=None)] + + +
    + + +

    The lower percentile used to determine the value to align with zero.

    +
    + + +
    +
    +
    + max_percentile: Annotated[Union[int, float], Interval(gt=1, ge=None, lt=None, le=100)] + + +
    + + +

    The upper percentile used to determine the value to align with one. +Has to be bigger than min_percentile. +The range is 1 to 100 instead of 0 to 100 to avoid mistakenly +accepting percentiles specified in the range 0.0 to 1.0.

    +
    + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + min_smaller_max(self, info: pydantic_core.core_schema.ValidationInfo) -> Self: + + + +
    + +
    687    @model_validator(mode="after")
    +688    def min_smaller_max(self, info: ValidationInfo) -> Self:
    +689        if self.min_percentile >= self.max_percentile:
    +690            raise ValueError(
    +691                f"min_percentile {self.min_percentile} >= max_percentile"
    +692                + f" {self.max_percentile}"
    +693            )
    +694
    +695        return self
    +
    + + + + +
    +
    +
    + eps: Annotated[float, Interval(gt=0, ge=None, lt=None, le=0.1)] + + +
    + + +

    Epsilon for numeric stability. +out = (tensor - v_lower) / (v_upper - v_lower + eps); +with v_lower,v_upper values at the respective percentiles.

    +
    + + +
    +
    +
    + reference_tensor: Optional[TensorName] + + +
    + + +

    Tensor name to compute the percentiles from. Default: The tensor itself. +For any tensor in inputs only input tensor references are allowed. +For a tensor in outputs only input tensor refereences are allowed if mode: per_dataset

    +
    + + +
    +
    +
    + +
    + + class + ScaleRangeDescr(ProcessingDescrBase): + + + +
    + +
    708class ScaleRangeDescr(ProcessingDescrBase):
    +709    """Scale with percentiles."""
    +710
    +711    name: Literal["scale_range"] = "scale_range"
    +712    kwargs: ScaleRangeKwargs
    +
    + + +

    Scale with percentiles.

    +
    + + +
    +
    + name: Literal['scale_range'] + + +
    + + + + +
    +
    +
    + kwargs: ScaleRangeKwargs + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + ScaleMeanVarianceKwargs(ProcessingKwargs): + + + +
    + +
    715class ScaleMeanVarianceKwargs(ProcessingKwargs):
    +716    """key word arguments for `ScaleMeanVarianceDescr`"""
    +717
    +718    mode: Literal["per_dataset", "per_sample"]
    +719    """Mode for computing mean and variance.
    +720    |     mode    |             description              |
    +721    | ----------- | ------------------------------------ |
    +722    | per_dataset | Compute for the entire dataset       |
    +723    | per_sample  | Compute for each sample individually |
    +724    """
    +725
    +726    reference_tensor: TensorName
    +727    """Name of tensor to match."""
    +728
    +729    axes: Annotated[Optional[AxesInCZYX], Field(examples=["xy"])] = None
    +730    """The subset of axes to scale jointly.
    +731    For example xy to normalize the two image axes for 2d data jointly.
    +732    Default: scale all non-batch axes jointly."""
    +733
    +734    eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-6
    +735    """Epsilon for numeric stability:
    +736    "`out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean."""
    +
    + + +

    key word arguments for ScaleMeanVarianceDescr

    +
    + + +
    +
    + mode: Literal['per_dataset', 'per_sample'] + + +
    + + +

    Mode for computing mean and variance.

    + + + + + + + + + + + + + + + + + + +
    modedescription
    per_datasetCompute for the entire dataset
    per_sampleCompute for each sample individually
    +
    + + +
    +
    +
    + reference_tensor: TensorName + + +
    + + +

    Name of tensor to match.

    +
    + + +
    +
    +
    + axes: Annotated[Optional[Annotated[str, RestrictCharacters(alphabet='czyx'), AfterValidator(func=<function validate_unique_entries at 0x7f5383752ac0>)]], FieldInfo(annotation=NoneType, required=True, examples=['xy'])] + + +
    + + +

    The subset of axes to scale jointly. +For example xy to normalize the two image axes for 2d data jointly. +Default: scale all non-batch axes jointly.

    +
    + + +
    +
    +
    + eps: Annotated[float, Interval(gt=0, ge=None, lt=None, le=0.1)] + + +
    + + +

    Epsilon for numeric stability: +"`out = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.

    +
    + + +
    +
    +
    + +
    + + class + ScaleMeanVarianceDescr(ProcessingDescrBase): + + + +
    + +
    739class ScaleMeanVarianceDescr(ProcessingDescrBase):
    +740    """Scale the tensor s.t. its mean and variance match a reference tensor."""
    +741
    +742    name: Literal["scale_mean_variance"] = "scale_mean_variance"
    +743    kwargs: ScaleMeanVarianceKwargs
    +
    + + +

    Scale the tensor s.t. its mean and variance match a reference tensor.

    +
    + + +
    +
    + name: Literal['scale_mean_variance'] + + +
    + + + + +
    +
    +
    + kwargs: ScaleMeanVarianceKwargs + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    +
    + PreprocessingDescr = + + typing.Annotated[typing.Union[BinarizeDescr, ClipDescr, ScaleLinearDescr, SigmoidDescr, ZeroMeanUnitVarianceDescr, ScaleRangeDescr], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + + + +
    +
    +
    + PostprocessingDescr = + + typing.Annotated[typing.Union[BinarizeDescr, ClipDescr, ScaleLinearDescr, SigmoidDescr, ZeroMeanUnitVarianceDescr, ScaleRangeDescr, ScaleMeanVarianceDescr], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + + + +
    +
    + +
    + + class + InputTensorDescr(TensorDescrBase): + + + +
    + +
    771class InputTensorDescr(TensorDescrBase):
    +772    data_type: Literal["float32", "uint8", "uint16"]
    +773    """For now an input tensor is expected to be given as `float32`.
    +774    The data flow in bioimage.io models is explained
    +775    [in this diagram.](https://docs.google.com/drawings/d/1FTw8-Rn6a6nXdkZ_SkMumtcjvur9mtIhRqLwnKqZNHM/edit)."""
    +776
    +777    shape: Annotated[
    +778        Union[Sequence[int], ParameterizedInputShape],
    +779        Field(
    +780            examples=[(1, 512, 512, 1), dict(min=(1, 64, 64, 1), step=(0, 32, 32, 0))]
    +781        ),
    +782    ]
    +783    """Specification of input tensor shape."""
    +784
    +785    preprocessing: List[PreprocessingDescr] = Field(default_factory=list)
    +786    """Description of how this input should be preprocessed."""
    +787
    +788    @model_validator(mode="after")
    +789    def zero_batch_step_and_one_batch_size(self) -> Self:
    +790        bidx = self.axes.find("b")
    +791        if bidx == -1:
    +792            return self
    +793
    +794        if isinstance(self.shape, ParameterizedInputShape):
    +795            step = self.shape.step
    +796            shape = self.shape.min
    +797            if step[bidx] != 0:
    +798                raise ValueError(
    +799                    "Input shape step has to be zero in the batch dimension (the batch"
    +800                    + " dimension can always be increased, but `step` should specify how"
    +801                    + " to increase the minimal shape to find the largest single batch"
    +802                    + " shape)"
    +803                )
    +804        else:
    +805            shape = self.shape
    +806
    +807        if shape[bidx] != 1:
    +808            raise ValueError("Input shape has to be 1 in the batch dimension b.")
    +809
    +810        return self
    +811
    +812    @model_validator(mode="after")
    +813    def validate_preprocessing_kwargs(self) -> Self:
    +814        for p in self.preprocessing:
    +815            kwargs_axes = p.kwargs.get("axes", "")
    +816            if not isinstance(kwargs_axes, str):
    +817                raise ValueError(
    +818                    f"Expected an `axes` string, but got {type(kwargs_axes)}"
    +819                )
    +820
    +821            if any(a not in self.axes for a in kwargs_axes):
    +822                raise ValueError("`kwargs.axes` needs to be subset of `axes`")
    +823
    +824        return self
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + data_type: Literal['float32', 'uint8', 'uint16'] + + +
    + + +

    For now an input tensor is expected to be given as float32. +The data flow in bioimage.io models is explained +in this diagram..

    +
    + + +
    +
    +
    + shape: Annotated[Union[Sequence[int], ParameterizedInputShape], FieldInfo(annotation=NoneType, required=True, examples=[(1, 512, 512, 1), {'min': (1, 64, 64, 1), 'step': (0, 32, 32, 0)}])] + + +
    + + +

    Specification of input tensor shape.

    +
    + + +
    +
    +
    + preprocessing: List[Annotated[Union[BinarizeDescr, ClipDescr, ScaleLinearDescr, SigmoidDescr, ZeroMeanUnitVarianceDescr, ScaleRangeDescr], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]] + + +
    + + +

    Description of how this input should be preprocessed.

    +
    + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + zero_batch_step_and_one_batch_size(self) -> Self: + + + +
    + +
    788    @model_validator(mode="after")
    +789    def zero_batch_step_and_one_batch_size(self) -> Self:
    +790        bidx = self.axes.find("b")
    +791        if bidx == -1:
    +792            return self
    +793
    +794        if isinstance(self.shape, ParameterizedInputShape):
    +795            step = self.shape.step
    +796            shape = self.shape.min
    +797            if step[bidx] != 0:
    +798                raise ValueError(
    +799                    "Input shape step has to be zero in the batch dimension (the batch"
    +800                    + " dimension can always be increased, but `step` should specify how"
    +801                    + " to increase the minimal shape to find the largest single batch"
    +802                    + " shape)"
    +803                )
    +804        else:
    +805            shape = self.shape
    +806
    +807        if shape[bidx] != 1:
    +808            raise ValueError("Input shape has to be 1 in the batch dimension b.")
    +809
    +810        return self
    +
    + + + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + validate_preprocessing_kwargs(self) -> Self: + + + +
    + +
    812    @model_validator(mode="after")
    +813    def validate_preprocessing_kwargs(self) -> Self:
    +814        for p in self.preprocessing:
    +815            kwargs_axes = p.kwargs.get("axes", "")
    +816            if not isinstance(kwargs_axes, str):
    +817                raise ValueError(
    +818                    f"Expected an `axes` string, but got {type(kwargs_axes)}"
    +819                )
    +820
    +821            if any(a not in self.axes for a in kwargs_axes):
    +822                raise ValueError("`kwargs.axes` needs to be subset of `axes`")
    +823
    +824        return self
    +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + OutputTensorDescr(TensorDescrBase): + + + +
    + +
    827class OutputTensorDescr(TensorDescrBase):
    +828    data_type: Literal[
    +829        "float32",
    +830        "float64",
    +831        "uint8",
    +832        "int8",
    +833        "uint16",
    +834        "int16",
    +835        "uint32",
    +836        "int32",
    +837        "uint64",
    +838        "int64",
    +839        "bool",
    +840    ]
    +841    """Data type.
    +842    The data flow in bioimage.io models is explained
    +843    [in this diagram.](https://docs.google.com/drawings/d/1FTw8-Rn6a6nXdkZ_SkMumtcjvur9mtIhRqLwnKqZNHM/edit)."""
    +844
    +845    shape: Union[Sequence[int], ImplicitOutputShape]
    +846    """Output tensor shape."""
    +847
    +848    halo: Optional[Sequence[int]] = None
    +849    """The `halo` that should be cropped from the output tensor to avoid boundary effects.
    +850    The `halo` is to be cropped from both sides, i.e. `shape_after_crop = shape - 2 * halo`.
    +851    To document a `halo` that is already cropped by the model `shape.offset` has to be used instead."""
    +852
    +853    postprocessing: List[PostprocessingDescr] = Field(default_factory=list)
    +854    """Description of how this output should be postprocessed."""
    +855
    +856    @model_validator(mode="after")
    +857    def matching_halo_length(self) -> Self:
    +858        if self.halo and len(self.halo) != len(self.shape):
    +859            raise ValueError(
    +860                f"halo {self.halo} has to have same length as shape {self.shape}!"
    +861            )
    +862
    +863        return self
    +864
    +865    @model_validator(mode="after")
    +866    def validate_postprocessing_kwargs(self) -> Self:
    +867        for p in self.postprocessing:
    +868            kwargs_axes = p.kwargs.get("axes", "")
    +869            if not isinstance(kwargs_axes, str):
    +870                raise ValueError(f"Expected {kwargs_axes} to be a string")
    +871
    +872            if any(a not in self.axes for a in kwargs_axes):
    +873                raise ValueError("`kwargs.axes` needs to be subset of axes")
    +874
    +875        return self
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + data_type: Literal['float32', 'float64', 'uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32', 'uint64', 'int64', 'bool'] + + +
    + + +

    Data type. +The data flow in bioimage.io models is explained +in this diagram..

    +
    + + +
    +
    +
    + shape: Union[Sequence[int], ImplicitOutputShape] + + +
    + + +

    Output tensor shape.

    +
    + + +
    +
    +
    + halo: Optional[Sequence[int]] + + +
    + + +

    The halo that should be cropped from the output tensor to avoid boundary effects. +The halo is to be cropped from both sides, i.e. shape_after_crop = shape - 2 * halo. +To document a halo that is already cropped by the model shape.offset has to be used instead.

    +
    + + +
    +
    +
    + postprocessing: List[Annotated[Union[BinarizeDescr, ClipDescr, ScaleLinearDescr, SigmoidDescr, ZeroMeanUnitVarianceDescr, ScaleRangeDescr, ScaleMeanVarianceDescr], Discriminator(discriminator='name', custom_error_type=None, custom_error_message=None, custom_error_context=None)]] + + +
    + + +

    Description of how this output should be postprocessed.

    +
    + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + matching_halo_length(self) -> Self: + + + +
    + +
    856    @model_validator(mode="after")
    +857    def matching_halo_length(self) -> Self:
    +858        if self.halo and len(self.halo) != len(self.shape):
    +859            raise ValueError(
    +860                f"halo {self.halo} has to have same length as shape {self.shape}!"
    +861            )
    +862
    +863        return self
    +
    + + + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + validate_postprocessing_kwargs(self) -> Self: + + + +
    + +
    865    @model_validator(mode="after")
    +866    def validate_postprocessing_kwargs(self) -> Self:
    +867        for p in self.postprocessing:
    +868            kwargs_axes = p.kwargs.get("axes", "")
    +869            if not isinstance(kwargs_axes, str):
    +870                raise ValueError(f"Expected {kwargs_axes} to be a string")
    +871
    +872            if any(a not in self.axes for a in kwargs_axes):
    +873                raise ValueError("`kwargs.axes` needs to be subset of axes")
    +874
    +875        return self
    +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    +
    + KnownRunMode = +typing.Literal['deepimagej'] + + +
    + + + + +
    +
    + +
    + + class + RunMode(bioimageio.spec._internal.node.Node): + + + +
    + +
    881class RunMode(Node):
    +882    name: Annotated[
    +883        Union[KnownRunMode, str], warn(KnownRunMode, "Unknown run mode '{value}'.")
    +884    ]
    +885    """Run mode name"""
    +886
    +887    kwargs: Dict[str, Any] = Field(default_factory=dict)
    +888    """Run mode specific key word arguments"""
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + name: Annotated[Union[Literal['deepimagej'], str], AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f536f9eeca0>, severity=30, msg="Unknown run mode '{value}'.", context={'typ': Literal['deepimagej']})] + + +
    + + +

    Run mode name

    +
    + + +
    +
    +
    + kwargs: Dict[str, Any] + + +
    + + +

    Run mode specific key word arguments

    +
    + + +
    +
    +
    + +
    + + class + LinkedModel(bioimageio.spec._internal.node.Node): + + + +
    + +
    891class LinkedModel(Node):
    +892    """Reference to a bioimage.io model."""
    +893
    +894    id: ModelId
    +895    """A valid model `id` from the bioimage.io collection."""
    +896
    +897    version_number: Optional[int] = None
    +898    """version number (n-th published version, not the semantic version) of linked model"""
    +
    + + +

    Reference to a bioimage.io model.

    +
    + + +
    +
    + id: ModelId + + +
    + + +

    A valid model id from the bioimage.io collection.

    +
    + + +
    +
    +
    + version_number: Optional[int] + + +
    + + +

    version number (n-th published version, not the semantic version) of linked model

    +
    + + +
    +
    +
    + +
    + + def + package_weights( value: bioimageio.spec._internal.node.Node, handler: pydantic_core.core_schema.SerializerFunctionWrapHandler, info: pydantic_core.core_schema.SerializationInfo): + + + +
    + +
    901def package_weights(
    +902    value: Node,  # Union[v0_4.WeightsDescr, v0_5.WeightsDescr]
    +903    handler: SerializerFunctionWrapHandler,
    +904    info: SerializationInfo,
    +905):
    +906    ctxt = packaging_context_var.get()
    +907    if ctxt is not None and ctxt.weights_priority_order is not None:
    +908        for wf in ctxt.weights_priority_order:
    +909            w = getattr(value, wf, None)
    +910            if w is not None:
    +911                break
    +912        else:
    +913            raise ValueError(
    +914                "None of the weight formats in `weights_priority_order`"
    +915                + f" ({ctxt.weights_priority_order}) is present in the given model."
    +916            )
    +917
    +918        assert isinstance(w, Node), type(w)
    +919        # construct WeightsDescr with new single weight format entry
    +920        new_w = w.model_construct(**{k: v for k, v in w if k != "parent"})
    +921        value = value.model_construct(None, **{wf: new_w})
    +922
    +923    return handler(
    +924        value, info  # pyright: ignore[reportArgumentType]  # taken from pydantic docs
    +925    )
    +
    + + + + +
    +
    + +
    + + class + ModelDescr(bioimageio.spec.generic.v0_2.GenericModelDescrBase): + + + +
    + +
     928class ModelDescr(GenericModelDescrBase, title="bioimage.io model specification"):
    + 929    """Specification of the fields used in a bioimage.io-compliant RDF that describes AI models with pretrained weights.
    + 930
    + 931    These fields are typically stored in a YAML file which we call a model resource description file (model RDF).
    + 932    """
    + 933
    + 934    format_version: Literal["0.4.10",] = "0.4.10"
    + 935    """Version of the bioimage.io model description specification used.
    + 936    When creating a new model always use the latest micro/patch version described here.
    + 937    The `format_version` is important for any consumer software to understand how to parse the fields.
    + 938    """
    + 939
    + 940    type: Literal["model"] = "model"
    + 941    """Specialized resource type 'model'"""
    + 942
    + 943    id: Optional[ModelId] = None
    + 944    """bioimage.io-wide unique resource identifier
    + 945    assigned by bioimage.io; version **un**specific."""
    + 946
    + 947    authors: NotEmpty[  # pyright: ignore[reportGeneralTypeIssues]  # make mandatory
    + 948        List[Author]
    + 949    ]
    + 950    """The authors are the creators of the model RDF and the primary points of contact."""
    + 951
    + 952    documentation: Annotated[
    + 953        ImportantFileSource,
    + 954        Field(
    + 955            examples=[
    + 956                "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
    + 957                "README.md",
    + 958            ],
    + 959        ),
    + 960    ]
    + 961    """∈📦 URL or relative path to a markdown file with additional documentation.
    + 962    The recommended documentation file name is `README.md`. An `.md` suffix is mandatory.
    + 963    The documentation should include a '[#[#]]# Validation' (sub)section
    + 964    with details on how to quantitatively validate the model on unseen data."""
    + 965
    + 966    inputs: NotEmpty[List[InputTensorDescr]]
    + 967    """Describes the input tensors expected by this model."""
    + 968
    + 969    license: Annotated[
    + 970        Union[LicenseId, str],
    + 971        warn(LicenseId, "Unknown license id '{value}'."),
    + 972        Field(examples=["CC0-1.0", "MIT", "BSD-2-Clause"]),
    + 973    ]
    + 974    """A [SPDX license identifier](https://spdx.org/licenses/).
    + 975    We do notsupport custom license beyond the SPDX license list, if you need that please
    + 976    [open a GitHub issue](https://github.com/bioimage-io/spec-bioimage-io/issues/new/choose
    + 977    ) to discuss your intentions with the community."""
    + 978
    + 979    name: Annotated[
    + 980        str,
    + 981        MinLen(1),
    + 982        warn(MinLen(5), "Name shorter than 5 characters.", INFO),
    + 983        warn(MaxLen(64), "Name longer than 64 characters.", INFO),
    + 984    ]
    + 985    """A human-readable name of this model.
    + 986    It should be no longer than 64 characters and only contain letter, number, underscore, minus or space characters."""
    + 987
    + 988    outputs: NotEmpty[List[OutputTensorDescr]]
    + 989    """Describes the output tensors."""
    + 990
    + 991    @field_validator("inputs", "outputs")
    + 992    @classmethod
    + 993    def unique_tensor_descr_names(
    + 994        cls, value: Sequence[Union[InputTensorDescr, OutputTensorDescr]]
    + 995    ) -> Sequence[Union[InputTensorDescr, OutputTensorDescr]]:
    + 996        unique_names = {str(v.name) for v in value}
    + 997        if len(unique_names) != len(value):
    + 998            raise ValueError("Duplicate tensor descriptor names")
    + 999
    +1000        return value
    +1001
    +1002    @model_validator(mode="after")
    +1003    def unique_io_names(self) -> Self:
    +1004        unique_names = {str(ss.name) for s in (self.inputs, self.outputs) for ss in s}
    +1005        if len(unique_names) != (len(self.inputs) + len(self.outputs)):
    +1006            raise ValueError("Duplicate tensor descriptor names across inputs/outputs")
    +1007
    +1008        return self
    +1009
    +1010    @model_validator(mode="after")
    +1011    def minimum_shape2valid_output(self) -> Self:
    +1012        tensors_by_name: Dict[
    +1013            TensorName, Union[InputTensorDescr, OutputTensorDescr]
    +1014        ] = {t.name: t for t in self.inputs + self.outputs}
    +1015
    +1016        for out in self.outputs:
    +1017            if isinstance(out.shape, ImplicitOutputShape):
    +1018                ndim_ref = len(tensors_by_name[out.shape.reference_tensor].shape)
    +1019                ndim_out_ref = len(
    +1020                    [scale for scale in out.shape.scale if scale is not None]
    +1021                )
    +1022                if ndim_ref != ndim_out_ref:
    +1023                    expanded_dim_note = (
    +1024                        " Note that expanded dimensions (`scale`: null) are not"
    +1025                        + f" counted for {out.name}'sdimensionality here."
    +1026                        if None in out.shape.scale
    +1027                        else ""
    +1028                    )
    +1029                    raise ValueError(
    +1030                        f"Referenced tensor '{out.shape.reference_tensor}' with"
    +1031                        + f" {ndim_ref} dimensions does not match output tensor"
    +1032                        + f" '{out.name}' with"
    +1033                        + f" {ndim_out_ref} dimensions.{expanded_dim_note}"
    +1034                    )
    +1035
    +1036            min_out_shape = self._get_min_shape(out, tensors_by_name)
    +1037            if out.halo:
    +1038                halo = out.halo
    +1039                halo_msg = f" for halo {out.halo}"
    +1040            else:
    +1041                halo = [0] * len(min_out_shape)
    +1042                halo_msg = ""
    +1043
    +1044            if any([s - 2 * h < 1 for s, h in zip(min_out_shape, halo)]):
    +1045                raise ValueError(
    +1046                    f"Minimal shape {min_out_shape} of output {out.name} is too"
    +1047                    + f" small{halo_msg}."
    +1048                )
    +1049
    +1050        return self
    +1051
    +1052    @classmethod
    +1053    def _get_min_shape(
    +1054        cls,
    +1055        t: Union[InputTensorDescr, OutputTensorDescr],
    +1056        tensors_by_name: Dict[TensorName, Union[InputTensorDescr, OutputTensorDescr]],
    +1057    ) -> Sequence[int]:
    +1058        """output with subtracted halo has to result in meaningful output even for the minimal input
    +1059        see https://github.com/bioimage-io/spec-bioimage-io/issues/392
    +1060        """
    +1061        if isinstance(t.shape, collections.abc.Sequence):
    +1062            return t.shape
    +1063        elif isinstance(t.shape, ParameterizedInputShape):
    +1064            return t.shape.min
    +1065        elif isinstance(t.shape, ImplicitOutputShape):
    +1066            pass
    +1067        else:
    +1068            assert_never(t.shape)
    +1069
    +1070        ref_shape = cls._get_min_shape(
    +1071            tensors_by_name[t.shape.reference_tensor], tensors_by_name
    +1072        )
    +1073
    +1074        if None not in t.shape.scale:
    +1075            scale: Sequence[float, ...] = t.shape.scale  # type: ignore
    +1076        else:
    +1077            expanded_dims = [idx for idx, sc in enumerate(t.shape.scale) if sc is None]
    +1078            new_ref_shape: List[int] = []
    +1079            for idx in range(len(t.shape.scale)):
    +1080                ref_idx = idx - sum(int(exp < idx) for exp in expanded_dims)
    +1081                new_ref_shape.append(1 if idx in expanded_dims else ref_shape[ref_idx])
    +1082
    +1083            ref_shape = new_ref_shape
    +1084            assert len(ref_shape) == len(t.shape.scale)
    +1085            scale = [0.0 if sc is None else sc for sc in t.shape.scale]
    +1086
    +1087        offset = t.shape.offset
    +1088        assert len(offset) == len(scale)
    +1089        return [int(rs * s + 2 * off) for rs, s, off in zip(ref_shape, scale, offset)]
    +1090
    +1091    @model_validator(mode="after")
    +1092    def validate_tensor_references_in_inputs(self) -> Self:
    +1093        for t in self.inputs:
    +1094            for proc in t.preprocessing:
    +1095                if "reference_tensor" not in proc.kwargs:
    +1096                    continue
    +1097
    +1098                ref_tensor = proc.kwargs["reference_tensor"]
    +1099                if ref_tensor is not None and str(ref_tensor) not in {
    +1100                    str(t.name) for t in self.inputs
    +1101                }:
    +1102                    raise ValueError(f"'{ref_tensor}' not found in inputs")
    +1103
    +1104                if ref_tensor == t.name:
    +1105                    raise ValueError(
    +1106                        f"invalid self reference for preprocessing of tensor {t.name}"
    +1107                    )
    +1108
    +1109        return self
    +1110
    +1111    @model_validator(mode="after")
    +1112    def validate_tensor_references_in_outputs(self) -> Self:
    +1113        for t in self.outputs:
    +1114            for proc in t.postprocessing:
    +1115                if "reference_tensor" not in proc.kwargs:
    +1116                    continue
    +1117                ref_tensor = proc.kwargs["reference_tensor"]
    +1118                if ref_tensor is not None and str(ref_tensor) not in {
    +1119                    str(t.name) for t in self.inputs
    +1120                }:
    +1121                    raise ValueError(f"{ref_tensor} not found in inputs")
    +1122
    +1123        return self
    +1124
    +1125    packaged_by: List[Author] = Field(default_factory=list)
    +1126    """The persons that have packaged and uploaded this model.
    +1127    Only required if those persons differ from the `authors`."""
    +1128
    +1129    parent: Optional[LinkedModel] = None
    +1130    """The model from which this model is derived, e.g. by fine-tuning the weights."""
    +1131
    +1132    @field_validator("parent", mode="before")
    +1133    @classmethod
    +1134    def ignore_url_parent(cls, parent: Any):
    +1135        if isinstance(parent, dict):
    +1136            return None
    +1137
    +1138        else:
    +1139            return parent
    +1140
    +1141    run_mode: Optional[RunMode] = None
    +1142    """Custom run mode for this model: for more complex prediction procedures like test time
    +1143    data augmentation that currently cannot be expressed in the specification.
    +1144    No standard run modes are defined yet."""
    +1145
    +1146    sample_inputs: List[ImportantFileSource] = Field(default_factory=list)
    +1147    """∈📦 URLs/relative paths to sample inputs to illustrate possible inputs for the model,
    +1148    for example stored as PNG or TIFF images.
    +1149    The sample files primarily serve to inform a human user about an example use case"""
    +1150
    +1151    sample_outputs: List[ImportantFileSource] = Field(default_factory=list)
    +1152    """∈📦 URLs/relative paths to sample outputs corresponding to the `sample_inputs`."""
    +1153
    +1154    test_inputs: NotEmpty[
    +1155        List[Annotated[ImportantFileSource, WithSuffix(".npy", case_sensitive=True)]]
    +1156    ]
    +1157    """∈📦 Test input tensors compatible with the `inputs` description for a **single test case**.
    +1158    This means if your model has more than one input, you should provide one URL/relative path for each input.
    +1159    Each test input should be a file with an ndarray in
    +1160    [numpy.lib file format](https://numpy.org/doc/stable/reference/generated/numpy.lib.format.html#module-numpy.lib.format).
    +1161    The extension must be '.npy'."""
    +1162
    +1163    test_outputs: NotEmpty[
    +1164        List[Annotated[ImportantFileSource, WithSuffix(".npy", case_sensitive=True)]]
    +1165    ]
    +1166    """∈📦 Analog to `test_inputs`."""
    +1167
    +1168    timestamp: Datetime
    +1169    """Timestamp in [ISO 8601](#https://en.wikipedia.org/wiki/ISO_8601) format
    +1170    with a few restrictions listed [here](https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat)."""
    +1171
    +1172    training_data: Union[LinkedDataset, DatasetDescr, None] = None
    +1173    """The dataset used to train this model"""
    +1174
    +1175    weights: Annotated[WeightsDescr, WrapSerializer(package_weights)]
    +1176    """The weights for this model.
    +1177    Weights can be given for different formats, but should otherwise be equivalent.
    +1178    The available weight formats determine which consumers can use this model."""
    +1179
    +1180    @model_validator(mode="before")
    +1181    @classmethod
    +1182    def _convert_from_older_format(
    +1183        cls, data: BioimageioYamlContent, /
    +1184    ) -> BioimageioYamlContent:
    +1185        convert_from_older_format(data)
    +1186        return data
    +1187
    +1188    def get_input_test_arrays(self) -> List[NDArray[Any]]:
    +1189        data = [load_array(download(ipt).path) for ipt in self.test_inputs]
    +1190        assert all(isinstance(d, np.ndarray) for d in data)
    +1191        return data
    +1192
    +1193    def get_output_test_arrays(self) -> List[NDArray[Any]]:
    +1194        data = [load_array(download(out).path) for out in self.test_outputs]
    +1195        assert all(isinstance(d, np.ndarray) for d in data)
    +1196        return data
    +
    + + +

    Specification of the fields used in a bioimage.io-compliant RDF that describes AI models with pretrained weights.

    + +

    These fields are typically stored in a YAML file which we call a model resource description file (model RDF).

    +
    + + +
    +
    + format_version: Literal['0.4.10'] + + +
    + + +

    Version of the bioimage.io model description specification used. +When creating a new model always use the latest micro/patch version described here. +The format_version is important for any consumer software to understand how to parse the fields.

    +
    + + +
    +
    +
    + type: Literal['model'] + + +
    + + +

    Specialized resource type 'model'

    +
    + + +
    +
    +
    + id: Optional[ModelId] + + +
    + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    +
    + authors: Annotated[List[bioimageio.spec.generic.v0_2.Author], MinLen(min_length=1)] + + +
    + + +

    The authors are the creators of the model RDF and the primary points of contact.

    +
    + + +
    +
    +
    + documentation: Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none'), FieldInfo(annotation=NoneType, required=True, examples=['https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md', 'README.md'])] + + +
    + + +

    ∈📦 URL or relative path to a markdown file with additional documentation. +The recommended documentation file name is README.md. An .md suffix is mandatory. +The documentation should include a '[#[#]]# Validation' (sub)section +with details on how to quantitatively validate the model on unseen data.

    +
    + + +
    +
    +
    + inputs: Annotated[List[InputTensorDescr], MinLen(min_length=1)] + + +
    + + +

    Describes the input tensors expected by this model.

    +
    + + +
    +
    +
    + license: Annotated[Union[bioimageio.spec._internal.license_id.LicenseId, str], AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f536f9edda0>, severity=30, msg="Unknown license id '{value}'.", context={'typ': <class 'bioimageio.spec._internal.license_id.LicenseId'>}), FieldInfo(annotation=NoneType, required=True, examples=['CC0-1.0', 'MIT', 'BSD-2-Clause'])] + + +
    + + +

    A SPDX license identifier. +We do notsupport custom license beyond the SPDX license list, if you need that please +open a GitHub issue to discuss your intentions with the community.

    +
    + + +
    +
    +
    + name: Annotated[str, MinLen(min_length=1), AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f536f9ee7a0>, severity=20, msg='Name shorter than 5 characters.', context={'typ': Annotated[Any, MinLen(min_length=5)]}), AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f536f9ec540>, severity=20, msg='Name longer than 64 characters.', context={'typ': Annotated[Any, MaxLen(max_length=64)]})] + + +
    + + +

    A human-readable name of this model. +It should be no longer than 64 characters and only contain letter, number, underscore, minus or space characters.

    +
    + + +
    +
    +
    + outputs: Annotated[List[OutputTensorDescr], MinLen(min_length=1)] + + +
    + + +

    Describes the output tensors.

    +
    + + +
    +
    + +
    +
    @field_validator('inputs', 'outputs')
    +
    @classmethod
    + + def + unique_tensor_descr_names( cls, value: Sequence[Union[InputTensorDescr, OutputTensorDescr]]) -> Sequence[Union[InputTensorDescr, OutputTensorDescr]]: + + + +
    + +
     991    @field_validator("inputs", "outputs")
    + 992    @classmethod
    + 993    def unique_tensor_descr_names(
    + 994        cls, value: Sequence[Union[InputTensorDescr, OutputTensorDescr]]
    + 995    ) -> Sequence[Union[InputTensorDescr, OutputTensorDescr]]:
    + 996        unique_names = {str(v.name) for v in value}
    + 997        if len(unique_names) != len(value):
    + 998            raise ValueError("Duplicate tensor descriptor names")
    + 999
    +1000        return value
    +
    + + + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + unique_io_names(self) -> Self: + + + +
    + +
    1002    @model_validator(mode="after")
    +1003    def unique_io_names(self) -> Self:
    +1004        unique_names = {str(ss.name) for s in (self.inputs, self.outputs) for ss in s}
    +1005        if len(unique_names) != (len(self.inputs) + len(self.outputs)):
    +1006            raise ValueError("Duplicate tensor descriptor names across inputs/outputs")
    +1007
    +1008        return self
    +
    + + + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + minimum_shape2valid_output(self) -> Self: + + + +
    + +
    1010    @model_validator(mode="after")
    +1011    def minimum_shape2valid_output(self) -> Self:
    +1012        tensors_by_name: Dict[
    +1013            TensorName, Union[InputTensorDescr, OutputTensorDescr]
    +1014        ] = {t.name: t for t in self.inputs + self.outputs}
    +1015
    +1016        for out in self.outputs:
    +1017            if isinstance(out.shape, ImplicitOutputShape):
    +1018                ndim_ref = len(tensors_by_name[out.shape.reference_tensor].shape)
    +1019                ndim_out_ref = len(
    +1020                    [scale for scale in out.shape.scale if scale is not None]
    +1021                )
    +1022                if ndim_ref != ndim_out_ref:
    +1023                    expanded_dim_note = (
    +1024                        " Note that expanded dimensions (`scale`: null) are not"
    +1025                        + f" counted for {out.name}'sdimensionality here."
    +1026                        if None in out.shape.scale
    +1027                        else ""
    +1028                    )
    +1029                    raise ValueError(
    +1030                        f"Referenced tensor '{out.shape.reference_tensor}' with"
    +1031                        + f" {ndim_ref} dimensions does not match output tensor"
    +1032                        + f" '{out.name}' with"
    +1033                        + f" {ndim_out_ref} dimensions.{expanded_dim_note}"
    +1034                    )
    +1035
    +1036            min_out_shape = self._get_min_shape(out, tensors_by_name)
    +1037            if out.halo:
    +1038                halo = out.halo
    +1039                halo_msg = f" for halo {out.halo}"
    +1040            else:
    +1041                halo = [0] * len(min_out_shape)
    +1042                halo_msg = ""
    +1043
    +1044            if any([s - 2 * h < 1 for s, h in zip(min_out_shape, halo)]):
    +1045                raise ValueError(
    +1046                    f"Minimal shape {min_out_shape} of output {out.name} is too"
    +1047                    + f" small{halo_msg}."
    +1048                )
    +1049
    +1050        return self
    +
    + + + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + validate_tensor_references_in_inputs(self) -> Self: + + + +
    + +
    1091    @model_validator(mode="after")
    +1092    def validate_tensor_references_in_inputs(self) -> Self:
    +1093        for t in self.inputs:
    +1094            for proc in t.preprocessing:
    +1095                if "reference_tensor" not in proc.kwargs:
    +1096                    continue
    +1097
    +1098                ref_tensor = proc.kwargs["reference_tensor"]
    +1099                if ref_tensor is not None and str(ref_tensor) not in {
    +1100                    str(t.name) for t in self.inputs
    +1101                }:
    +1102                    raise ValueError(f"'{ref_tensor}' not found in inputs")
    +1103
    +1104                if ref_tensor == t.name:
    +1105                    raise ValueError(
    +1106                        f"invalid self reference for preprocessing of tensor {t.name}"
    +1107                    )
    +1108
    +1109        return self
    +
    + + + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + validate_tensor_references_in_outputs(self) -> Self: + + + +
    + +
    1111    @model_validator(mode="after")
    +1112    def validate_tensor_references_in_outputs(self) -> Self:
    +1113        for t in self.outputs:
    +1114            for proc in t.postprocessing:
    +1115                if "reference_tensor" not in proc.kwargs:
    +1116                    continue
    +1117                ref_tensor = proc.kwargs["reference_tensor"]
    +1118                if ref_tensor is not None and str(ref_tensor) not in {
    +1119                    str(t.name) for t in self.inputs
    +1120                }:
    +1121                    raise ValueError(f"{ref_tensor} not found in inputs")
    +1122
    +1123        return self
    +
    + + + + +
    +
    +
    + packaged_by: List[bioimageio.spec.generic.v0_2.Author] + + +
    + + +

    The persons that have packaged and uploaded this model. +Only required if those persons differ from the authors.

    +
    + + +
    +
    +
    + parent: Optional[LinkedModel] + + +
    + + +

    The model from which this model is derived, e.g. by fine-tuning the weights.

    +
    + + +
    +
    + +
    +
    @field_validator('parent', mode='before')
    +
    @classmethod
    + + def + ignore_url_parent(cls, parent: Any): + + + +
    + +
    1132    @field_validator("parent", mode="before")
    +1133    @classmethod
    +1134    def ignore_url_parent(cls, parent: Any):
    +1135        if isinstance(parent, dict):
    +1136            return None
    +1137
    +1138        else:
    +1139            return parent
    +
    + + + + +
    +
    +
    + run_mode: Optional[RunMode] + + +
    + + +

    Custom run mode for this model: for more complex prediction procedures like test time +data augmentation that currently cannot be expressed in the specification. +No standard run modes are defined yet.

    +
    + + +
    +
    +
    + sample_inputs: List[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')]] + + +
    + + +

    ∈📦 URLs/relative paths to sample inputs to illustrate possible inputs for the model, +for example stored as PNG or TIFF images. +The sample files primarily serve to inform a human user about an example use case

    +
    + + +
    +
    +
    + sample_outputs: List[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')]] + + +
    + + +

    ∈📦 URLs/relative paths to sample outputs corresponding to the sample_inputs.

    +
    + + +
    +
    +
    + test_inputs: Annotated[List[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none'), WithSuffix(suffix='.npy', case_sensitive=True)]], MinLen(min_length=1)] + + +
    + + +

    ∈📦 Test input tensors compatible with the inputs description for a single test case. +This means if your model has more than one input, you should provide one URL/relative path for each input. +Each test input should be a file with an ndarray in +numpy.lib file format. +The extension must be '.npy'.

    +
    + + +
    +
    +
    + test_outputs: Annotated[List[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none'), WithSuffix(suffix='.npy', case_sensitive=True)]], MinLen(min_length=1)] + + +
    + + +

    ∈📦 Analog to test_inputs.

    +
    + + +
    +
    +
    + timestamp: bioimageio.spec._internal.types.Datetime + + +
    + + +

    Timestamp in ISO 8601 format +with a few restrictions listed here.

    +
    + + +
    +
    + + + +

    The dataset used to train this model

    +
    + + +
    +
    +
    + weights: Annotated[WeightsDescr, WrapSerializer(func=<function package_weights at 0x7f537f154360>, return_type=PydanticUndefined, when_used='always')] + + +
    + + +

    The weights for this model. +Weights can be given for different formats, but should otherwise be equivalent. +The available weight formats determine which consumers can use this model.

    +
    + + +
    +
    + +
    + + def + get_input_test_arrays(self) -> List[numpy.ndarray[Any, numpy.dtype[Any]]]: + + + +
    + +
    1188    def get_input_test_arrays(self) -> List[NDArray[Any]]:
    +1189        data = [load_array(download(ipt).path) for ipt in self.test_inputs]
    +1190        assert all(isinstance(d, np.ndarray) for d in data)
    +1191        return data
    +
    + + + + +
    +
    + +
    + + def + get_output_test_arrays(self) -> List[numpy.ndarray[Any, numpy.dtype[Any]]]: + + + +
    + +
    1193    def get_output_test_arrays(self) -> List[NDArray[Any]]:
    +1194        data = [load_array(download(out).path) for out in self.test_outputs]
    +1195        assert all(isinstance(d, np.ndarray) for d in data)
    +1196        return data
    +
    + + + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.4.10' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 4, 10) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/model/v0_5.html b/bioimageio/spec/model/v0_5.html new file mode 100644 index 00000000..035269a0 --- /dev/null +++ b/bioimageio/spec/model/v0_5.html @@ -0,0 +1,11263 @@ + + + + + + + bioimageio.spec.model.v0_5 API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.model.v0_5

    + + + + + + +
       1from __future__ import annotations
    +   2
    +   3import collections.abc
    +   4import re
    +   5import string
    +   6import warnings
    +   7from abc import ABC
    +   8from copy import deepcopy
    +   9from datetime import datetime
    +  10from itertools import chain
    +  11from math import ceil
    +  12from pathlib import Path, PurePosixPath
    +  13from tempfile import mkdtemp
    +  14from typing import (
    +  15    TYPE_CHECKING,
    +  16    Any,
    +  17    ClassVar,
    +  18    Dict,
    +  19    FrozenSet,
    +  20    Generic,
    +  21    List,
    +  22    Literal,
    +  23    Mapping,
    +  24    NamedTuple,
    +  25    Optional,
    +  26    Sequence,
    +  27    Set,
    +  28    Tuple,
    +  29    Type,
    +  30    TypeVar,
    +  31    Union,
    +  32    cast,
    +  33)
    +  34
    +  35import numpy as np
    +  36from annotated_types import Ge, Gt, Interval, MaxLen, MinLen, Predicate
    +  37from imageio.v3 import imread, imwrite  # pyright: ignore[reportUnknownVariableType]
    +  38from loguru import logger
    +  39from numpy.typing import NDArray
    +  40from pydantic import (
    +  41    Discriminator,
    +  42    Field,
    +  43    RootModel,
    +  44    Tag,
    +  45    ValidationInfo,
    +  46    WrapSerializer,
    +  47    field_validator,
    +  48    model_validator,
    +  49)
    +  50from typing_extensions import Annotated, LiteralString, Self, assert_never
    +  51
    +  52from .._internal.common_nodes import (
    +  53    Converter,
    +  54    InvalidDescr,
    +  55    Node,
    +  56    NodeWithExplicitlySetFields,
    +  57)
    +  58from .._internal.constants import DTYPE_LIMITS
    +  59from .._internal.field_warning import issue_warning, warn
    +  60from .._internal.io import BioimageioYamlContent as BioimageioYamlContent
    +  61from .._internal.io import FileDescr as FileDescr
    +  62from .._internal.io import WithSuffix, YamlValue, download
    +  63from .._internal.io_basics import AbsoluteFilePath as AbsoluteFilePath
    +  64from .._internal.io_basics import Sha256 as Sha256
    +  65from .._internal.io_utils import load_array
    +  66from .._internal.types import Datetime as Datetime
    +  67from .._internal.types import Identifier as Identifier
    +  68from .._internal.types import (
    +  69    ImportantFileSource,
    +  70    LowerCaseIdentifier,
    +  71    LowerCaseIdentifierAnno,
    +  72    SiUnit,
    +  73)
    +  74from .._internal.types import NotEmpty as NotEmpty
    +  75from .._internal.url import HttpUrl as HttpUrl
    +  76from .._internal.validation_context import validation_context_var
    +  77from .._internal.validator_annotations import RestrictCharacters
    +  78from .._internal.version_type import Version as Version
    +  79from .._internal.warning_levels import INFO
    +  80from ..dataset.v0_2 import DatasetDescr as DatasetDescr02
    +  81from ..dataset.v0_2 import LinkedDataset as LinkedDataset02
    +  82from ..dataset.v0_3 import DatasetDescr as DatasetDescr
    +  83from ..dataset.v0_3 import DatasetId as DatasetId
    +  84from ..dataset.v0_3 import LinkedDataset as LinkedDataset
    +  85from ..dataset.v0_3 import Uploader as Uploader
    +  86from ..generic.v0_3 import (
    +  87    VALID_COVER_IMAGE_EXTENSIONS as VALID_COVER_IMAGE_EXTENSIONS,
    +  88)
    +  89from ..generic.v0_3 import Author as Author
    +  90from ..generic.v0_3 import BadgeDescr as BadgeDescr
    +  91from ..generic.v0_3 import CiteEntry as CiteEntry
    +  92from ..generic.v0_3 import DeprecatedLicenseId as DeprecatedLicenseId
    +  93from ..generic.v0_3 import (
    +  94    DocumentationSource,
    +  95    GenericModelDescrBase,
    +  96    LinkedResourceNode,
    +  97    _author_conv,  # pyright: ignore[reportPrivateUsage]
    +  98    _maintainer_conv,  # pyright: ignore[reportPrivateUsage]
    +  99)
    + 100from ..generic.v0_3 import Doi as Doi
    + 101from ..generic.v0_3 import LicenseId as LicenseId
    + 102from ..generic.v0_3 import LinkedResource as LinkedResource
    + 103from ..generic.v0_3 import Maintainer as Maintainer
    + 104from ..generic.v0_3 import OrcidId as OrcidId
    + 105from ..generic.v0_3 import RelativeFilePath as RelativeFilePath
    + 106from ..generic.v0_3 import ResourceId as ResourceId
    + 107from .v0_4 import Author as _Author_v0_4
    + 108from .v0_4 import BinarizeDescr as _BinarizeDescr_v0_4
    + 109from .v0_4 import CallableFromDepencency as CallableFromDepencency
    + 110from .v0_4 import CallableFromDepencency as _CallableFromDepencency_v0_4
    + 111from .v0_4 import CallableFromFile as _CallableFromFile_v0_4
    + 112from .v0_4 import ClipDescr as _ClipDescr_v0_4
    + 113from .v0_4 import ClipKwargs as ClipKwargs
    + 114from .v0_4 import ImplicitOutputShape as _ImplicitOutputShape_v0_4
    + 115from .v0_4 import InputTensorDescr as _InputTensorDescr_v0_4
    + 116from .v0_4 import KnownRunMode as KnownRunMode
    + 117from .v0_4 import ModelDescr as _ModelDescr_v0_4
    + 118from .v0_4 import OutputTensorDescr as _OutputTensorDescr_v0_4
    + 119from .v0_4 import ParameterizedInputShape as _ParameterizedInputShape_v0_4
    + 120from .v0_4 import PostprocessingDescr as _PostprocessingDescr_v0_4
    + 121from .v0_4 import PreprocessingDescr as _PreprocessingDescr_v0_4
    + 122from .v0_4 import ProcessingKwargs as ProcessingKwargs
    + 123from .v0_4 import RunMode as RunMode
    + 124from .v0_4 import ScaleLinearDescr as _ScaleLinearDescr_v0_4
    + 125from .v0_4 import ScaleMeanVarianceDescr as _ScaleMeanVarianceDescr_v0_4
    + 126from .v0_4 import ScaleRangeDescr as _ScaleRangeDescr_v0_4
    + 127from .v0_4 import SigmoidDescr as _SigmoidDescr_v0_4
    + 128from .v0_4 import TensorName as _TensorName_v0_4
    + 129from .v0_4 import WeightsFormat as WeightsFormat
    + 130from .v0_4 import ZeroMeanUnitVarianceDescr as _ZeroMeanUnitVarianceDescr_v0_4
    + 131from .v0_4 import package_weights
    + 132
    + 133# unit names from https://ngff.openmicroscopy.org/latest/#axes-md
    + 134SpaceUnit = Literal[
    + 135    "attometer",
    + 136    "angstrom",
    + 137    "centimeter",
    + 138    "decimeter",
    + 139    "exameter",
    + 140    "femtometer",
    + 141    "foot",
    + 142    "gigameter",
    + 143    "hectometer",
    + 144    "inch",
    + 145    "kilometer",
    + 146    "megameter",
    + 147    "meter",
    + 148    "micrometer",
    + 149    "mile",
    + 150    "millimeter",
    + 151    "nanometer",
    + 152    "parsec",
    + 153    "petameter",
    + 154    "picometer",
    + 155    "terameter",
    + 156    "yard",
    + 157    "yoctometer",
    + 158    "yottameter",
    + 159    "zeptometer",
    + 160    "zettameter",
    + 161]
    + 162
    + 163TimeUnit = Literal[
    + 164    "attosecond",
    + 165    "centisecond",
    + 166    "day",
    + 167    "decisecond",
    + 168    "exasecond",
    + 169    "femtosecond",
    + 170    "gigasecond",
    + 171    "hectosecond",
    + 172    "hour",
    + 173    "kilosecond",
    + 174    "megasecond",
    + 175    "microsecond",
    + 176    "millisecond",
    + 177    "minute",
    + 178    "nanosecond",
    + 179    "petasecond",
    + 180    "picosecond",
    + 181    "second",
    + 182    "terasecond",
    + 183    "yoctosecond",
    + 184    "yottasecond",
    + 185    "zeptosecond",
    + 186    "zettasecond",
    + 187]
    + 188
    + 189AxisType = Literal["batch", "channel", "index", "time", "space"]
    + 190
    + 191
    + 192class TensorId(LowerCaseIdentifier):
    + 193    root_model: ClassVar[Type[RootModel[Any]]] = RootModel[
    + 194        Annotated[LowerCaseIdentifierAnno, MaxLen(32)]
    + 195    ]
    + 196
    + 197
    + 198class AxisId(LowerCaseIdentifier):
    + 199    root_model: ClassVar[Type[RootModel[Any]]] = RootModel[
    + 200        Annotated[LowerCaseIdentifierAnno, MaxLen(16)]
    + 201    ]
    + 202
    + 203
    + 204def _is_batch(a: str) -> bool:
    + 205    return a == BATCH_AXIS_ID
    + 206
    + 207
    + 208def _is_not_batch(a: str) -> bool:
    + 209    return not _is_batch(a)
    + 210
    + 211
    + 212NonBatchAxisId = Annotated[AxisId, Predicate(_is_not_batch)]
    + 213
    + 214PostprocessingId = Literal[
    + 215    "binarize",
    + 216    "clip",
    + 217    "ensure_dtype",
    + 218    "fixed_zero_mean_unit_variance",
    + 219    "scale_linear",
    + 220    "scale_mean_variance",
    + 221    "scale_range",
    + 222    "sigmoid",
    + 223    "zero_mean_unit_variance",
    + 224]
    + 225PreprocessingId = Literal[
    + 226    "binarize",
    + 227    "clip",
    + 228    "ensure_dtype",
    + 229    "scale_linear",
    + 230    "sigmoid",
    + 231    "zero_mean_unit_variance",
    + 232    "scale_range",
    + 233]
    + 234
    + 235
    + 236SAME_AS_TYPE = "<same as type>"
    + 237
    + 238
    + 239ParameterizedSize_N = int
    + 240
    + 241
    + 242class ParameterizedSize(Node):
    + 243    """Describes a range of valid tensor axis sizes as `size = min + n*step`."""
    + 244
    + 245    N: ClassVar[Type[int]] = ParameterizedSize_N
    + 246    """integer to parameterize this axis"""
    + 247
    + 248    min: Annotated[int, Gt(0)]
    + 249    step: Annotated[int, Gt(0)]
    + 250
    + 251    def validate_size(self, size: int) -> int:
    + 252        if size < self.min:
    + 253            raise ValueError(f"size {size} < {self.min}")
    + 254        if (size - self.min) % self.step != 0:
    + 255            raise ValueError(
    + 256                f"axis of size {size} is not parameterized by `min + n*step` ="
    + 257                + f" `{self.min} + n*{self.step}`"
    + 258            )
    + 259
    + 260        return size
    + 261
    + 262    def get_size(self, n: ParameterizedSize_N) -> int:
    + 263        return self.min + self.step * n
    + 264
    + 265    def get_n(self, s: int) -> ParameterizedSize_N:
    + 266        """return smallest n parameterizing a size greater or equal than `s`"""
    + 267        return ceil((s - self.min) / self.step)
    + 268
    + 269
    + 270class DataDependentSize(Node):
    + 271    min: Annotated[int, Gt(0)] = 1
    + 272    max: Annotated[Optional[int], Gt(1)] = None
    + 273
    + 274    @model_validator(mode="after")
    + 275    def _validate_max_gt_min(self):
    + 276        if self.max is not None and self.min >= self.max:
    + 277            raise ValueError(f"expected `min` < `max`, but got {self.min}, {self.max}")
    + 278
    + 279        return self
    + 280
    + 281    def validate_size(self, size: int) -> int:
    + 282        if size < self.min:
    + 283            raise ValueError(f"size {size} < {self.min}")
    + 284
    + 285        if self.max is not None and size > self.max:
    + 286            raise ValueError(f"size {size} > {self.max}")
    + 287
    + 288        return size
    + 289
    + 290
    + 291class SizeReference(Node):
    + 292    """A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.
    + 293
    + 294    `axis.size = reference.size * reference.scale / axis.scale + offset`
    + 295
    + 296    note:
    + 297    1. The axis and the referenced axis need to have the same unit (or no unit).
    + 298    2. Batch axes may not be referenced.
    + 299    3. Fractions are rounded down.
    + 300    4. If the reference axis is `concatenable` the referencing axis is assumed to be
    + 301        `concatenable` as well with the same block order.
    + 302
    + 303    example:
    + 304    An unisotropic input image of w*h=100*49 pixels depicts a phsical space of 200*196mm².
    + 305    Let's assume that we want to express the image height h in relation to its width w
    + 306    instead of only accepting input images of exactly 100*49 pixels
    + 307    (for example to express a range of valid image shapes by parametrizing w, see `ParameterizedSize`).
    + 308
    + 309    >>> w = SpaceInputAxis(id=AxisId("w"), size=100, unit="millimeter", scale=2)
    + 310    >>> h = SpaceInputAxis(
    + 311    ...     id=AxisId("h"),
    + 312    ...     size=SizeReference(tensor_id=TensorId("input"), axis_id=AxisId("w"), offset=-1),
    + 313    ...     unit="millimeter",
    + 314    ...     scale=4,
    + 315    ... )
    + 316    >>> print(h.size.compute(h, w))
    + 317    49
    + 318
    + 319    -> h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49
    + 320    """
    + 321
    + 322    tensor_id: TensorId
    + 323    """tensor id of the reference axis"""
    + 324
    + 325    axis_id: AxisId
    + 326    """axis id of the reference axis"""
    + 327
    + 328    offset: int = 0
    + 329
    + 330    def get_size(
    + 331        self,
    + 332        axis: Union[
    + 333            ChannelAxis,
    + 334            IndexInputAxis,
    + 335            IndexOutputAxis,
    + 336            TimeInputAxis,
    + 337            SpaceInputAxis,
    + 338            TimeOutputAxis,
    + 339            TimeOutputAxisWithHalo,
    + 340            SpaceOutputAxis,
    + 341            SpaceOutputAxisWithHalo,
    + 342        ],
    + 343        ref_axis: Union[
    + 344            ChannelAxis,
    + 345            IndexInputAxis,
    + 346            IndexOutputAxis,
    + 347            TimeInputAxis,
    + 348            SpaceInputAxis,
    + 349            TimeOutputAxis,
    + 350            TimeOutputAxisWithHalo,
    + 351            SpaceOutputAxis,
    + 352            SpaceOutputAxisWithHalo,
    + 353        ],
    + 354        n: ParameterizedSize_N = 0,
    + 355        ref_size: Optional[int] = None,
    + 356    ):
    + 357        """Compute the concrete size for a given axis and its reference axis.
    + 358
    + 359        Args:
    + 360            axis: The axis this `SizeReference` is the size of.
    + 361            ref_axis: The reference axis to compute the size from.
    + 362            n: If the **ref_axis** is parameterized (of type `ParameterizedSize`)
    + 363                and no fixed **ref_size** is given,
    + 364                **n** is used to compute the size of the parameterized **ref_axis**.
    + 365            ref_size: Overwrite the reference size instead of deriving it from
    + 366                **ref_axis**
    + 367                (**ref_axis.scale** is still used; any given **n** is ignored).
    + 368        """
    + 369        assert (
    + 370            axis.size == self
    + 371        ), "Given `axis.size` is not defined by this `SizeReference`"
    + 372
    + 373        assert (
    + 374            ref_axis.id == self.axis_id
    + 375        ), f"Expected `ref_axis.id` to be {self.axis_id}, but got {ref_axis.id}."
    + 376
    + 377        assert axis.unit == ref_axis.unit, (
    + 378            "`SizeReference` requires `axis` and `ref_axis` to have the same `unit`,"
    + 379            f" but {axis.unit}!={ref_axis.unit}"
    + 380        )
    + 381        if ref_size is None:
    + 382            if isinstance(ref_axis.size, (int, float)):
    + 383                ref_size = ref_axis.size
    + 384            elif isinstance(ref_axis.size, ParameterizedSize):
    + 385                ref_size = ref_axis.size.get_size(n)
    + 386            elif isinstance(ref_axis.size, DataDependentSize):
    + 387                raise ValueError(
    + 388                    "Reference axis referenced in `SizeReference` may not be a `DataDependentSize`."
    + 389                )
    + 390            elif isinstance(ref_axis.size, SizeReference):
    + 391                raise ValueError(
    + 392                    "Reference axis referenced in `SizeReference` may not be sized by a"
    + 393                    + " `SizeReference` itself."
    + 394                )
    + 395            else:
    + 396                assert_never(ref_axis.size)
    + 397
    + 398        return int(ref_size * ref_axis.scale / axis.scale + self.offset)
    + 399
    + 400    @staticmethod
    + 401    def _get_unit(
    + 402        axis: Union[
    + 403            ChannelAxis,
    + 404            IndexInputAxis,
    + 405            IndexOutputAxis,
    + 406            TimeInputAxis,
    + 407            SpaceInputAxis,
    + 408            TimeOutputAxis,
    + 409            TimeOutputAxisWithHalo,
    + 410            SpaceOutputAxis,
    + 411            SpaceOutputAxisWithHalo,
    + 412        ],
    + 413    ):
    + 414        return axis.unit
    + 415
    + 416
    + 417# this Axis definition is compatible with the NGFF draft from July 10, 2023
    + 418# https://ngff.openmicroscopy.org/latest/#axes-md
    + 419class AxisBase(NodeWithExplicitlySetFields):
    + 420    fields_to_set_explicitly: ClassVar[FrozenSet[LiteralString]] = frozenset({"type"})
    + 421
    + 422    id: AxisId
    + 423    """An axis id unique across all axes of one tensor."""
    + 424
    + 425    description: Annotated[str, MaxLen(128)] = ""
    + 426
    + 427
    + 428class WithHalo(Node):
    + 429    halo: Annotated[int, Ge(1)]
    + 430    """The halo should be cropped from the output tensor to avoid boundary effects.
    + 431    It is to be cropped from both sides, i.e. `size_after_crop = size - 2 * halo`.
    + 432    To document a halo that is already cropped by the model use `size.offset` instead."""
    + 433
    + 434    size: Annotated[
    + 435        SizeReference,
    + 436        Field(
    + 437            examples=[
    + 438                10,
    + 439                SizeReference(
    + 440                    tensor_id=TensorId("t"), axis_id=AxisId("a"), offset=5
    + 441                ).model_dump(mode="json"),
    + 442            ]
    + 443        ),
    + 444    ]
    + 445    """reference to another axis with an optional offset (see `SizeReference`)"""
    + 446
    + 447
    + 448BATCH_AXIS_ID = AxisId("batch")
    + 449
    + 450
    + 451class BatchAxis(AxisBase):
    + 452    type: Literal["batch"] = "batch"
    + 453    id: Annotated[AxisId, Predicate(_is_batch)] = BATCH_AXIS_ID
    + 454    size: Optional[Literal[1]] = None
    + 455    """The batch size may be fixed to 1,
    + 456    otherwise (the default) it may be chosen arbitrarily depending on available memory"""
    + 457
    + 458    @property
    + 459    def scale(self):
    + 460        return 1.0
    + 461
    + 462    @property
    + 463    def concatenable(self):
    + 464        return True
    + 465
    + 466    @property
    + 467    def unit(self):
    + 468        return None
    + 469
    + 470
    + 471class ChannelAxis(AxisBase):
    + 472    type: Literal["channel"] = "channel"
    + 473    id: NonBatchAxisId = AxisId("channel")
    + 474    channel_names: NotEmpty[List[Identifier]]
    + 475
    + 476    @property
    + 477    def size(self) -> int:
    + 478        return len(self.channel_names)
    + 479
    + 480    @property
    + 481    def concatenable(self):
    + 482        return False
    + 483
    + 484    @property
    + 485    def scale(self) -> float:
    + 486        return 1.0
    + 487
    + 488    @property
    + 489    def unit(self):
    + 490        return None
    + 491
    + 492
    + 493class IndexAxisBase(AxisBase):
    + 494    type: Literal["index"] = "index"
    + 495    id: NonBatchAxisId = AxisId("index")
    + 496
    + 497    @property
    + 498    def scale(self) -> float:
    + 499        return 1.0
    + 500
    + 501    @property
    + 502    def unit(self):
    + 503        return None
    + 504
    + 505
    + 506class _WithInputAxisSize(Node):
    + 507    size: Annotated[
    + 508        Union[Annotated[int, Gt(0)], ParameterizedSize, SizeReference],
    + 509        Field(
    + 510            examples=[
    + 511                10,
    + 512                ParameterizedSize(min=32, step=16).model_dump(mode="json"),
    + 513                SizeReference(
    + 514                    tensor_id=TensorId("t"), axis_id=AxisId("a"), offset=5
    + 515                ).model_dump(mode="json"),
    + 516            ]
    + 517        ),
    + 518    ]
    + 519    """The size/length of this axis can be specified as
    + 520    - fixed integer
    + 521    - parameterized series of valid sizes (`ParameterizedSize`)
    + 522    - reference to another axis with an optional offset (`SizeReference`)
    + 523    """
    + 524
    + 525
    + 526class IndexInputAxis(IndexAxisBase, _WithInputAxisSize):
    + 527    concatenable: bool = False
    + 528    """If a model has a `concatenable` input axis, it can be processed blockwise,
    + 529    splitting a longer sample axis into blocks matching its input tensor description.
    + 530    Output axes are concatenable if they have a `SizeReference` to a concatenable
    + 531    input axis.
    + 532    """
    + 533
    + 534
    + 535class IndexOutputAxis(IndexAxisBase):
    + 536    size: Annotated[
    + 537        Union[Annotated[int, Gt(0)], SizeReference, DataDependentSize],
    + 538        Field(
    + 539            examples=[
    + 540                10,
    + 541                SizeReference(
    + 542                    tensor_id=TensorId("t"), axis_id=AxisId("a"), offset=5
    + 543                ).model_dump(mode="json"),
    + 544            ]
    + 545        ),
    + 546    ]
    + 547    """The size/length of this axis can be specified as
    + 548    - fixed integer
    + 549    - reference to another axis with an optional offset (`SizeReference`)
    + 550    - data dependent size using `DataDependentSize` (size is only known after model inference)
    + 551    """
    + 552
    + 553
    + 554class TimeAxisBase(AxisBase):
    + 555    type: Literal["time"] = "time"
    + 556    id: NonBatchAxisId = AxisId("time")
    + 557    unit: Optional[TimeUnit] = None
    + 558    scale: Annotated[float, Gt(0)] = 1.0
    + 559
    + 560
    + 561class TimeInputAxis(TimeAxisBase, _WithInputAxisSize):
    + 562    concatenable: bool = False
    + 563    """If a model has a `concatenable` input axis, it can be processed blockwise,
    + 564    splitting a longer sample axis into blocks matching its input tensor description.
    + 565    Output axes are concatenable if they have a `SizeReference` to a concatenable
    + 566    input axis.
    + 567    """
    + 568
    + 569
    + 570class SpaceAxisBase(AxisBase):
    + 571    type: Literal["space"] = "space"
    + 572    id: Annotated[NonBatchAxisId, Field(examples=["x", "y", "z"])] = AxisId("x")
    + 573    unit: Optional[SpaceUnit] = None
    + 574    scale: Annotated[float, Gt(0)] = 1.0
    + 575
    + 576
    + 577class SpaceInputAxis(SpaceAxisBase, _WithInputAxisSize):
    + 578    concatenable: bool = False
    + 579    """If a model has a `concatenable` input axis, it can be processed blockwise,
    + 580    splitting a longer sample axis into blocks matching its input tensor description.
    + 581    Output axes are concatenable if they have a `SizeReference` to a concatenable
    + 582    input axis.
    + 583    """
    + 584
    + 585
    + 586_InputAxisUnion = Union[
    + 587    BatchAxis, ChannelAxis, IndexInputAxis, TimeInputAxis, SpaceInputAxis
    + 588]
    + 589InputAxis = Annotated[_InputAxisUnion, Discriminator("type")]
    + 590
    + 591
    + 592class _WithOutputAxisSize(Node):
    + 593    size: Annotated[
    + 594        Union[Annotated[int, Gt(0)], SizeReference],
    + 595        Field(
    + 596            examples=[
    + 597                10,
    + 598                SizeReference(
    + 599                    tensor_id=TensorId("t"), axis_id=AxisId("a"), offset=5
    + 600                ).model_dump(mode="json"),
    + 601            ]
    + 602        ),
    + 603    ]
    + 604    """The size/length of this axis can be specified as
    + 605    - fixed integer
    + 606    - reference to another axis with an optional offset (see `SizeReference`)
    + 607    """
    + 608
    + 609
    + 610class TimeOutputAxis(TimeAxisBase, _WithOutputAxisSize):
    + 611    pass
    + 612
    + 613
    + 614class TimeOutputAxisWithHalo(TimeAxisBase, WithHalo):
    + 615    pass
    + 616
    + 617
    + 618def _get_halo_axis_discriminator_value(v: Any) -> Literal["with_halo", "wo_halo"]:
    + 619    if isinstance(v, dict):
    + 620        return "with_halo" if "halo" in v else "wo_halo"
    + 621    else:
    + 622        return "with_halo" if hasattr(v, "halo") else "wo_halo"
    + 623
    + 624
    + 625_TimeOutputAxisUnion = Annotated[
    + 626    Union[
    + 627        Annotated[TimeOutputAxis, Tag("wo_halo")],
    + 628        Annotated[TimeOutputAxisWithHalo, Tag("with_halo")],
    + 629    ],
    + 630    Discriminator(_get_halo_axis_discriminator_value),
    + 631]
    + 632
    + 633
    + 634class SpaceOutputAxis(SpaceAxisBase, _WithOutputAxisSize):
    + 635    pass
    + 636
    + 637
    + 638class SpaceOutputAxisWithHalo(SpaceAxisBase, WithHalo):
    + 639    pass
    + 640
    + 641
    + 642_SpaceOutputAxisUnion = Annotated[
    + 643    Union[
    + 644        Annotated[SpaceOutputAxis, Tag("wo_halo")],
    + 645        Annotated[SpaceOutputAxisWithHalo, Tag("with_halo")],
    + 646    ],
    + 647    Discriminator(_get_halo_axis_discriminator_value),
    + 648]
    + 649
    + 650
    + 651_OutputAxisUnion = Union[
    + 652    BatchAxis, ChannelAxis, IndexOutputAxis, _TimeOutputAxisUnion, _SpaceOutputAxisUnion
    + 653]
    + 654OutputAxis = Annotated[_OutputAxisUnion, Discriminator("type")]
    + 655
    + 656AnyAxis = Union[InputAxis, OutputAxis]
    + 657
    + 658TVs = Union[
    + 659    NotEmpty[List[int]],
    + 660    NotEmpty[List[float]],
    + 661    NotEmpty[List[bool]],
    + 662    NotEmpty[List[str]],
    + 663]
    + 664
    + 665
    + 666NominalOrOrdinalDType = Literal[
    + 667    "float32",
    + 668    "float64",
    + 669    "uint8",
    + 670    "int8",
    + 671    "uint16",
    + 672    "int16",
    + 673    "uint32",
    + 674    "int32",
    + 675    "uint64",
    + 676    "int64",
    + 677    "bool",
    + 678]
    + 679
    + 680
    + 681class NominalOrOrdinalDataDescr(Node):
    + 682    values: TVs
    + 683    """A fixed set of nominal or an ascending sequence of ordinal values.
    + 684    In this case `data_type` is required to be an unsigend integer type, e.g. 'uint8'.
    + 685    String `values` are interpreted as labels for tensor values 0, ..., N.
    + 686    Note: as YAML 1.2 does not natively support a "set" datatype,
    + 687    nominal values should be given as a sequence (aka list/array) as well.
    + 688    """
    + 689
    + 690    type: Annotated[
    + 691        NominalOrOrdinalDType,
    + 692        Field(
    + 693            examples=[
    + 694                "float32",
    + 695                "uint8",
    + 696                "uint16",
    + 697                "int64",
    + 698                "bool",
    + 699            ],
    + 700        ),
    + 701    ] = "uint8"
    + 702
    + 703    @model_validator(mode="after")
    + 704    def _validate_values_match_type(
    + 705        self,
    + 706    ) -> Self:
    + 707        incompatible: List[Any] = []
    + 708        for v in self.values:
    + 709            if self.type == "bool":
    + 710                if not isinstance(v, bool):
    + 711                    incompatible.append(v)
    + 712            elif self.type in DTYPE_LIMITS:
    + 713                if (
    + 714                    isinstance(v, (int, float))
    + 715                    and (
    + 716                        v < DTYPE_LIMITS[self.type].min
    + 717                        or v > DTYPE_LIMITS[self.type].max
    + 718                    )
    + 719                    or (isinstance(v, str) and "uint" not in self.type)
    + 720                    or (isinstance(v, float) and "int" in self.type)
    + 721                ):
    + 722                    incompatible.append(v)
    + 723            else:
    + 724                incompatible.append(v)
    + 725
    + 726            if len(incompatible) == 5:
    + 727                incompatible.append("...")
    + 728                break
    + 729
    + 730        if incompatible:
    + 731            raise ValueError(
    + 732                f"data type '{self.type}' incompatible with values {incompatible}"
    + 733            )
    + 734
    + 735        return self
    + 736
    + 737    unit: Optional[Union[Literal["arbitrary unit"], SiUnit]] = None
    + 738
    + 739    @property
    + 740    def range(self):
    + 741        if isinstance(self.values[0], str):
    + 742            return 0, len(self.values) - 1
    + 743        else:
    + 744            return min(self.values), max(self.values)
    + 745
    + 746
    + 747IntervalOrRatioDType = Literal[
    + 748    "float32",
    + 749    "float64",
    + 750    "uint8",
    + 751    "int8",
    + 752    "uint16",
    + 753    "int16",
    + 754    "uint32",
    + 755    "int32",
    + 756    "uint64",
    + 757    "int64",
    + 758]
    + 759
    + 760
    + 761class IntervalOrRatioDataDescr(Node):
    + 762    type: Annotated[  # todo: rename to dtype
    + 763        IntervalOrRatioDType,
    + 764        Field(
    + 765            examples=["float32", "float64", "uint8", "uint16"],
    + 766        ),
    + 767    ] = "float32"
    + 768    range: Tuple[Optional[float], Optional[float]] = (
    + 769        None,
    + 770        None,
    + 771    )
    + 772    """Tuple `(minimum, maximum)` specifying the allowed range of the data in this tensor.
    + 773    `None` corresponds to min/max of what can be expressed by `data_type`."""
    + 774    unit: Union[Literal["arbitrary unit"], SiUnit] = "arbitrary unit"
    + 775    scale: float = 1.0
    + 776    """Scale for data on an interval (or ratio) scale."""
    + 777    offset: Optional[float] = None
    + 778    """Offset for data on a ratio scale."""
    + 779
    + 780
    + 781TensorDataDescr = Union[NominalOrOrdinalDataDescr, IntervalOrRatioDataDescr]
    + 782
    + 783
    + 784class ProcessingDescrBase(NodeWithExplicitlySetFields, ABC):
    + 785    """processing base class"""
    + 786
    + 787    # id: Literal[PreprocessingId, PostprocessingId]  # make abstract field
    + 788    fields_to_set_explicitly: ClassVar[FrozenSet[LiteralString]] = frozenset({"id"})
    + 789
    + 790
    + 791class BinarizeKwargs(ProcessingKwargs):
    + 792    """key word arguments for `BinarizeDescr`"""
    + 793
    + 794    threshold: float
    + 795    """The fixed threshold"""
    + 796
    + 797
    + 798class BinarizeAlongAxisKwargs(ProcessingKwargs):
    + 799    """key word arguments for `BinarizeDescr`"""
    + 800
    + 801    threshold: NotEmpty[List[float]]
    + 802    """The fixed threshold values along `axis`"""
    + 803
    + 804    axis: Annotated[NonBatchAxisId, Field(examples=["channel"])]
    + 805    """The `threshold` axis"""
    + 806
    + 807
    + 808class BinarizeDescr(ProcessingDescrBase):
    + 809    """Binarize the tensor with a fixed threshold.
    + 810
    + 811    Values above `BinarizeKwargs.threshold`/`BinarizeAlongAxisKwargs.threshold`
    + 812    will be set to one, values below the threshold to zero.
    + 813    """
    + 814
    + 815    id: Literal["binarize"] = "binarize"
    + 816    kwargs: Union[BinarizeKwargs, BinarizeAlongAxisKwargs]
    + 817
    + 818
    + 819class ClipDescr(ProcessingDescrBase):
    + 820    """Set tensor values below min to min and above max to max."""
    + 821
    + 822    id: Literal["clip"] = "clip"
    + 823    kwargs: ClipKwargs
    + 824
    + 825
    + 826class EnsureDtypeKwargs(ProcessingKwargs):
    + 827    """key word arguments for `EnsureDtypeDescr`"""
    + 828
    + 829    dtype: Literal[
    + 830        "float32",
    + 831        "float64",
    + 832        "uint8",
    + 833        "int8",
    + 834        "uint16",
    + 835        "int16",
    + 836        "uint32",
    + 837        "int32",
    + 838        "uint64",
    + 839        "int64",
    + 840        "bool",
    + 841    ]
    + 842
    + 843
    + 844class EnsureDtypeDescr(ProcessingDescrBase):
    + 845    """cast the tensor data type to `EnsureDtypeKwargs.dtype` (if not matching)"""
    + 846
    + 847    id: Literal["ensure_dtype"] = "ensure_dtype"
    + 848    kwargs: EnsureDtypeKwargs
    + 849
    + 850
    + 851class ScaleLinearKwargs(ProcessingKwargs):
    + 852    """key word arguments for `ScaleLinearDescr`"""
    + 853
    + 854    gain: float = 1.0
    + 855    """multiplicative factor"""
    + 856
    + 857    offset: float = 0.0
    + 858    """additive term"""
    + 859
    + 860    @model_validator(mode="after")
    + 861    def _validate(self) -> Self:
    + 862        if self.gain == 1.0 and self.offset == 0.0:
    + 863            raise ValueError(
    + 864                "Redundant linear scaling not allowd. Set `gain` != 1.0 and/or `offset`"
    + 865                + " != 0.0."
    + 866            )
    + 867
    + 868        return self
    + 869
    + 870
    + 871class ScaleLinearAlongAxisKwargs(ProcessingKwargs):
    + 872    """key word arguments for `ScaleLinearDescr`"""
    + 873
    + 874    axis: Annotated[NonBatchAxisId, Field(examples=["channel"])]
    + 875    """The axis of of gains/offsets values."""
    + 876
    + 877    gain: Union[float, NotEmpty[List[float]]] = 1.0
    + 878    """multiplicative factor"""
    + 879
    + 880    offset: Union[float, NotEmpty[List[float]]] = 0.0
    + 881    """additive term"""
    + 882
    + 883    @model_validator(mode="after")
    + 884    def _validate(self) -> Self:
    + 885
    + 886        if isinstance(self.gain, list):
    + 887            if isinstance(self.offset, list):
    + 888                if len(self.gain) != len(self.offset):
    + 889                    raise ValueError(
    + 890                        f"Size of `gain` ({len(self.gain)}) and `offset` ({len(self.offset)}) must match."
    + 891                    )
    + 892            else:
    + 893                self.offset = [float(self.offset)] * len(self.gain)
    + 894        elif isinstance(self.offset, list):
    + 895            self.gain = [float(self.gain)] * len(self.offset)
    + 896        else:
    + 897            raise ValueError(
    + 898                "Do not specify an `axis` for scalar gain and offset values."
    + 899            )
    + 900
    + 901        if all(g == 1.0 for g in self.gain) and all(off == 0.0 for off in self.offset):
    + 902            raise ValueError(
    + 903                "Redundant linear scaling not allowd. Set `gain` != 1.0 and/or `offset`"
    + 904                + " != 0.0."
    + 905            )
    + 906
    + 907        return self
    + 908
    + 909
    + 910class ScaleLinearDescr(ProcessingDescrBase):
    + 911    """Fixed linear scaling."""
    + 912
    + 913    id: Literal["scale_linear"] = "scale_linear"
    + 914    kwargs: Union[ScaleLinearKwargs, ScaleLinearAlongAxisKwargs]
    + 915
    + 916
    + 917class SigmoidDescr(ProcessingDescrBase):
    + 918    """The logistic sigmoid funciton, a.k.a. expit function."""
    + 919
    + 920    id: Literal["sigmoid"] = "sigmoid"
    + 921
    + 922    @property
    + 923    def kwargs(self) -> ProcessingKwargs:
    + 924        """empty kwargs"""
    + 925        return ProcessingKwargs()
    + 926
    + 927
    + 928class FixedZeroMeanUnitVarianceKwargs(ProcessingKwargs):
    + 929    """key word arguments for `FixedZeroMeanUnitVarianceDescr`"""
    + 930
    + 931    mean: float
    + 932    """The mean value to normalize with."""
    + 933
    + 934    std: Annotated[float, Ge(1e-6)]
    + 935    """The standard deviation value to normalize with."""
    + 936
    + 937
    + 938class FixedZeroMeanUnitVarianceAlongAxisKwargs(ProcessingKwargs):
    + 939    """key word arguments for `FixedZeroMeanUnitVarianceDescr`"""
    + 940
    + 941    mean: NotEmpty[List[float]]
    + 942    """The mean value(s) to normalize with."""
    + 943
    + 944    std: NotEmpty[List[Annotated[float, Ge(1e-6)]]]
    + 945    """The standard deviation value(s) to normalize with.
    + 946    Size must match `mean` values."""
    + 947
    + 948    axis: Annotated[NonBatchAxisId, Field(examples=["channel", "index"])]
    + 949    """The axis of the mean/std values to normalize each entry along that dimension
    + 950    separately."""
    + 951
    + 952    @model_validator(mode="after")
    + 953    def _mean_and_std_match(self) -> Self:
    + 954        if len(self.mean) != len(self.std):
    + 955            raise ValueError(
    + 956                f"Size of `mean` ({len(self.mean)}) and `std` ({len(self.std)})"
    + 957                + " must match."
    + 958            )
    + 959
    + 960        return self
    + 961
    + 962
    + 963class FixedZeroMeanUnitVarianceDescr(ProcessingDescrBase):
    + 964    """Subtract a given mean and divide by the standard deviation.
    + 965
    + 966    Normalize with fixed, precomputed values for
    + 967    `FixedZeroMeanUnitVarianceKwargs.mean` and `FixedZeroMeanUnitVarianceKwargs.std`
    + 968    Use `FixedZeroMeanUnitVarianceAlongAxisKwargs` for independent scaling along given
    + 969    axes.
    + 970    """
    + 971
    + 972    id: Literal["fixed_zero_mean_unit_variance"] = "fixed_zero_mean_unit_variance"
    + 973    kwargs: Union[
    + 974        FixedZeroMeanUnitVarianceKwargs, FixedZeroMeanUnitVarianceAlongAxisKwargs
    + 975    ]
    + 976
    + 977
    + 978class ZeroMeanUnitVarianceKwargs(ProcessingKwargs):
    + 979    """key word arguments for `ZeroMeanUnitVarianceDescr`"""
    + 980
    + 981    axes: Annotated[
    + 982        Optional[Sequence[AxisId]], Field(examples=[("batch", "x", "y")])
    + 983    ] = None
    + 984    """The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std.
    + 985    For example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')
    + 986    resulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.
    + 987    To normalize each sample independently leave out the 'batch' axis.
    + 988    Default: Scale all axes jointly."""
    + 989
    + 990    eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-6
    + 991    """epsilon for numeric stability: `out = (tensor - mean) / (std + eps)`."""
    + 992
    + 993
    + 994class ZeroMeanUnitVarianceDescr(ProcessingDescrBase):
    + 995    """Subtract mean and divide by variance."""
    + 996
    + 997    id: Literal["zero_mean_unit_variance"] = "zero_mean_unit_variance"
    + 998    kwargs: ZeroMeanUnitVarianceKwargs
    + 999
    +1000
    +1001class ScaleRangeKwargs(ProcessingKwargs):
    +1002    """key word arguments for `ScaleRangeDescr`
    +1003
    +1004    For `min_percentile`=0.0 (the default) and `max_percentile`=100 (the default)
    +1005    this processing step normalizes data to the [0, 1] intervall.
    +1006    For other percentiles the normalized values will partially be outside the [0, 1]
    +1007    intervall. Use `ScaleRange` followed by `ClipDescr` if you want to limit the
    +1008    normalized values to a range.
    +1009    """
    +1010
    +1011    axes: Annotated[
    +1012        Optional[Sequence[AxisId]], Field(examples=[("batch", "x", "y")])
    +1013    ] = None
    +1014    """The subset of axes to normalize jointly, i.e. axes to reduce to compute the min/max percentile value.
    +1015    For example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')
    +1016    resulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.
    +1017    To normalize samples indepdencently, leave out the "batch" axis.
    +1018    Default: Scale all axes jointly."""
    +1019
    +1020    min_percentile: Annotated[float, Interval(ge=0, lt=100)] = 0.0
    +1021    """The lower percentile used to determine the value to align with zero."""
    +1022
    +1023    max_percentile: Annotated[float, Interval(gt=1, le=100)] = 100.0
    +1024    """The upper percentile used to determine the value to align with one.
    +1025    Has to be bigger than `min_percentile`.
    +1026    The range is 1 to 100 instead of 0 to 100 to avoid mistakenly
    +1027    accepting percentiles specified in the range 0.0 to 1.0."""
    +1028
    +1029    eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-6
    +1030    """Epsilon for numeric stability.
    +1031    `out = (tensor - v_lower) / (v_upper - v_lower + eps)`;
    +1032    with `v_lower,v_upper` values at the respective percentiles."""
    +1033
    +1034    reference_tensor: Optional[TensorId] = None
    +1035    """Tensor ID to compute the percentiles from. Default: The tensor itself.
    +1036    For any tensor in `inputs` only input tensor references are allowed."""
    +1037
    +1038    @field_validator("max_percentile", mode="after")
    +1039    @classmethod
    +1040    def min_smaller_max(cls, value: float, info: ValidationInfo) -> float:
    +1041        if (min_p := info.data["min_percentile"]) >= value:
    +1042            raise ValueError(f"min_percentile {min_p} >= max_percentile {value}")
    +1043
    +1044        return value
    +1045
    +1046
    +1047class ScaleRangeDescr(ProcessingDescrBase):
    +1048    """Scale with percentiles."""
    +1049
    +1050    id: Literal["scale_range"] = "scale_range"
    +1051    kwargs: ScaleRangeKwargs
    +1052
    +1053
    +1054class ScaleMeanVarianceKwargs(ProcessingKwargs):
    +1055    """key word arguments for `ScaleMeanVarianceKwargs`"""
    +1056
    +1057    reference_tensor: TensorId
    +1058    """Name of tensor to match."""
    +1059
    +1060    axes: Annotated[
    +1061        Optional[Sequence[AxisId]], Field(examples=[("batch", "x", "y")])
    +1062    ] = None
    +1063    """The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std.
    +1064    For example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')
    +1065    resulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.
    +1066    To normalize samples independently, leave out the 'batch' axis.
    +1067    Default: Scale all axes jointly."""
    +1068
    +1069    eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-6
    +1070    """Epsilon for numeric stability:
    +1071    `out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.`"""
    +1072
    +1073
    +1074class ScaleMeanVarianceDescr(ProcessingDescrBase):
    +1075    """Scale a tensor's data distribution to match another tensor's mean/std.
    +1076    `out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.`
    +1077    """
    +1078
    +1079    id: Literal["scale_mean_variance"] = "scale_mean_variance"
    +1080    kwargs: ScaleMeanVarianceKwargs
    +1081
    +1082
    +1083PreprocessingDescr = Annotated[
    +1084    Union[
    +1085        BinarizeDescr,
    +1086        ClipDescr,
    +1087        EnsureDtypeDescr,
    +1088        ScaleLinearDescr,
    +1089        SigmoidDescr,
    +1090        FixedZeroMeanUnitVarianceDescr,
    +1091        ZeroMeanUnitVarianceDescr,
    +1092        ScaleRangeDescr,
    +1093    ],
    +1094    Discriminator("id"),
    +1095]
    +1096PostprocessingDescr = Annotated[
    +1097    Union[
    +1098        BinarizeDescr,
    +1099        ClipDescr,
    +1100        EnsureDtypeDescr,
    +1101        ScaleLinearDescr,
    +1102        SigmoidDescr,
    +1103        FixedZeroMeanUnitVarianceDescr,
    +1104        ZeroMeanUnitVarianceDescr,
    +1105        ScaleRangeDescr,
    +1106        ScaleMeanVarianceDescr,
    +1107    ],
    +1108    Discriminator("id"),
    +1109]
    +1110
    +1111IO_AxisT = TypeVar("IO_AxisT", InputAxis, OutputAxis)
    +1112
    +1113
    +1114class TensorDescrBase(Node, Generic[IO_AxisT]):
    +1115    id: TensorId
    +1116    """Tensor id. No duplicates are allowed."""
    +1117
    +1118    description: Annotated[str, MaxLen(128)] = ""
    +1119    """free text description"""
    +1120
    +1121    axes: NotEmpty[Sequence[IO_AxisT]]
    +1122    """tensor axes"""
    +1123
    +1124    @property
    +1125    def shape(self):
    +1126        return tuple(a.size for a in self.axes)
    +1127
    +1128    @field_validator("axes", mode="after", check_fields=False)
    +1129    @classmethod
    +1130    def _validate_axes(cls, axes: Sequence[AnyAxis]) -> Sequence[AnyAxis]:
    +1131        batch_axes = [a for a in axes if a.type == "batch"]
    +1132        if len(batch_axes) > 1:
    +1133            raise ValueError(
    +1134                f"Only one batch axis (per tensor) allowed, but got {batch_axes}"
    +1135            )
    +1136
    +1137        seen_ids: Set[AxisId] = set()
    +1138        duplicate_axes_ids: Set[AxisId] = set()
    +1139        for a in axes:
    +1140            (duplicate_axes_ids if a.id in seen_ids else seen_ids).add(a.id)
    +1141
    +1142        if duplicate_axes_ids:
    +1143            raise ValueError(f"Duplicate axis ids: {duplicate_axes_ids}")
    +1144
    +1145        return axes
    +1146
    +1147    test_tensor: FileDescr
    +1148    """An example tensor to use for testing.
    +1149    Using the model with the test input tensors is expected to yield the test output tensors.
    +1150    Each test tensor has be a an ndarray in the
    +1151    [numpy.lib file format](https://numpy.org/doc/stable/reference/generated/numpy.lib.format.html#module-numpy.lib.format).
    +1152    The file extension must be '.npy'."""
    +1153
    +1154    sample_tensor: Optional[FileDescr] = None
    +1155    """A sample tensor to illustrate a possible input/output for the model,
    +1156    The sample image primarily serves to inform a human user about an example use case
    +1157    and is typically stored as .hdf5, .png or .tiff.
    +1158    It has to be readable by the [imageio library](https://imageio.readthedocs.io/en/stable/formats/index.html#supported-formats)
    +1159    (numpy's `.npy` format is not supported).
    +1160    The image dimensionality has to match the number of axes specified in this tensor description.
    +1161    """
    +1162
    +1163    @model_validator(mode="after")
    +1164    def _validate_sample_tensor(self) -> Self:
    +1165        if (
    +1166            self.sample_tensor is None
    +1167            or not validation_context_var.get().perform_io_checks
    +1168        ):
    +1169            return self
    +1170
    +1171        local = download(self.sample_tensor.source, sha256=self.sample_tensor.sha256)
    +1172        tensor: NDArray[Any] = imread(
    +1173            local.path.read_bytes(),
    +1174            extension=PurePosixPath(local.original_file_name).suffix,
    +1175        )
    +1176        n_dims = len(tensor.squeeze().shape)
    +1177        n_dims_min = n_dims_max = len(self.axes)
    +1178
    +1179        for a in self.axes:
    +1180            if isinstance(a, BatchAxis):
    +1181                n_dims_min -= 1
    +1182            elif isinstance(a.size, int):
    +1183                if a.size == 1:
    +1184                    n_dims_min -= 1
    +1185            elif isinstance(a.size, (ParameterizedSize, DataDependentSize)):
    +1186                if a.size.min == 1:
    +1187                    n_dims_min -= 1
    +1188            elif isinstance(a.size, SizeReference):
    +1189                if a.size.offset < 2:
    +1190                    # size reference may result in singleton axis
    +1191                    n_dims_min -= 1
    +1192            else:
    +1193                assert_never(a.size)
    +1194
    +1195        n_dims_min = max(0, n_dims_min)
    +1196        if n_dims < n_dims_min or n_dims > n_dims_max:
    +1197            raise ValueError(
    +1198                f"Expected sample tensor to have {n_dims_min} to"
    +1199                + f" {n_dims_max} dimensions, but found {n_dims} (shape: {tensor.shape})."
    +1200            )
    +1201
    +1202        return self
    +1203
    +1204    data: Union[TensorDataDescr, NotEmpty[Sequence[TensorDataDescr]]] = (
    +1205        IntervalOrRatioDataDescr()
    +1206    )
    +1207    """Description of the tensor's data values, optionally per channel.
    +1208    If specified per channel, the data `type` needs to match across channels."""
    +1209
    +1210    @property
    +1211    def dtype(
    +1212        self,
    +1213    ) -> Literal[
    +1214        "float32",
    +1215        "float64",
    +1216        "uint8",
    +1217        "int8",
    +1218        "uint16",
    +1219        "int16",
    +1220        "uint32",
    +1221        "int32",
    +1222        "uint64",
    +1223        "int64",
    +1224        "bool",
    +1225    ]:
    +1226        """dtype as specified under `data.type` or `data[i].type`"""
    +1227        if isinstance(self.data, collections.abc.Sequence):
    +1228            return self.data[0].type
    +1229        else:
    +1230            return self.data.type
    +1231
    +1232    @field_validator("data", mode="after")
    +1233    @classmethod
    +1234    def _check_data_type_across_channels(
    +1235        cls, value: Union[TensorDataDescr, NotEmpty[Sequence[TensorDataDescr]]]
    +1236    ) -> Union[TensorDataDescr, NotEmpty[Sequence[TensorDataDescr]]]:
    +1237        if not isinstance(value, list):
    +1238            return value
    +1239
    +1240        dtypes = {t.type for t in value}
    +1241        if len(dtypes) > 1:
    +1242            raise ValueError(
    +1243                "Tensor data descriptions per channel need to agree in their data"
    +1244                + f" `type`, but found {dtypes}."
    +1245            )
    +1246
    +1247        return value
    +1248
    +1249    @model_validator(mode="after")
    +1250    def _check_data_matches_channelaxis(self) -> Self:
    +1251        if not isinstance(self.data, (list, tuple)):
    +1252            return self
    +1253
    +1254        for a in self.axes:
    +1255            if isinstance(a, ChannelAxis):
    +1256                size = a.size
    +1257                assert isinstance(size, int)
    +1258                break
    +1259        else:
    +1260            return self
    +1261
    +1262        if len(self.data) != size:
    +1263            raise ValueError(
    +1264                f"Got tensor data descriptions for {len(self.data)} channels, but"
    +1265                + f" '{a.id}' axis has size {size}."
    +1266            )
    +1267
    +1268        return self
    +1269
    +1270    def get_axis_sizes_for_array(self, array: NDArray[Any]) -> Dict[AxisId, int]:
    +1271        if len(array.shape) != len(self.axes):
    +1272            raise ValueError(
    +1273                f"Dimension mismatch: array shape {array.shape} (#{len(array.shape)})"
    +1274                + f" incompatible with {len(self.axes)} axes."
    +1275            )
    +1276        return {a.id: array.shape[i] for i, a in enumerate(self.axes)}
    +1277
    +1278
    +1279class InputTensorDescr(TensorDescrBase[InputAxis]):
    +1280    id: TensorId = TensorId("input")
    +1281    """Input tensor id.
    +1282    No duplicates are allowed across all inputs and outputs."""
    +1283
    +1284    optional: bool = False
    +1285    """indicates that this tensor may be `None`"""
    +1286
    +1287    preprocessing: List[PreprocessingDescr] = Field(default_factory=list)
    +1288    """Description of how this input should be preprocessed.
    +1289
    +1290    notes:
    +1291    - If preprocessing does not start with an 'ensure_dtype' entry, it is added
    +1292      to ensure an input tensor's data type matches the input tensor's data description.
    +1293    - If preprocessing does not end with an 'ensure_dtype' or 'binarize' entry, an
    +1294      'ensure_dtype' step is added to ensure preprocessing steps are not unintentionally
    +1295      changing the data type.
    +1296    """
    +1297
    +1298    @model_validator(mode="after")
    +1299    def _validate_preprocessing_kwargs(self) -> Self:
    +1300        axes_ids = [a.id for a in self.axes]
    +1301        for p in self.preprocessing:
    +1302            kwargs_axes: Optional[Sequence[Any]] = p.kwargs.get("axes")
    +1303            if kwargs_axes is None:
    +1304                continue
    +1305
    +1306            if not isinstance(kwargs_axes, collections.abc.Sequence):
    +1307                raise ValueError(
    +1308                    f"Expected `preprocessing.i.kwargs.axes` to be a sequence, but got {type(kwargs_axes)}"
    +1309                )
    +1310
    +1311            if any(a not in axes_ids for a in kwargs_axes):
    +1312                raise ValueError(
    +1313                    "`preprocessing.i.kwargs.axes` needs to be subset of axes ids"
    +1314                )
    +1315
    +1316        if isinstance(self.data, (NominalOrOrdinalDataDescr, IntervalOrRatioDataDescr)):
    +1317            dtype = self.data.type
    +1318        else:
    +1319            dtype = self.data[0].type
    +1320
    +1321        # ensure `preprocessing` begins with `EnsureDtypeDescr`
    +1322        if not self.preprocessing or not isinstance(
    +1323            self.preprocessing[0], EnsureDtypeDescr
    +1324        ):
    +1325            self.preprocessing.insert(
    +1326                0, EnsureDtypeDescr(kwargs=EnsureDtypeKwargs(dtype=dtype))
    +1327            )
    +1328
    +1329        # ensure `preprocessing` ends with `EnsureDtypeDescr` or `BinarizeDescr`
    +1330        if not isinstance(self.preprocessing[-1], (EnsureDtypeDescr, BinarizeDescr)):
    +1331            self.preprocessing.append(
    +1332                EnsureDtypeDescr(kwargs=EnsureDtypeKwargs(dtype=dtype))
    +1333            )
    +1334
    +1335        return self
    +1336
    +1337
    +1338def convert_axes(
    +1339    axes: str,
    +1340    *,
    +1341    shape: Union[
    +1342        Sequence[int], _ParameterizedInputShape_v0_4, _ImplicitOutputShape_v0_4
    +1343    ],
    +1344    tensor_type: Literal["input", "output"],
    +1345    halo: Optional[Sequence[int]],
    +1346    size_refs: Mapping[_TensorName_v0_4, Mapping[str, int]],
    +1347):
    +1348    ret: List[AnyAxis] = []
    +1349    for i, a in enumerate(axes):
    +1350        axis_type = _AXIS_TYPE_MAP.get(a, a)
    +1351        if axis_type == "batch":
    +1352            ret.append(BatchAxis())
    +1353            continue
    +1354
    +1355        scale = 1.0
    +1356        if isinstance(shape, _ParameterizedInputShape_v0_4):
    +1357            if shape.step[i] == 0:
    +1358                size = shape.min[i]
    +1359            else:
    +1360                size = ParameterizedSize(min=shape.min[i], step=shape.step[i])
    +1361        elif isinstance(shape, _ImplicitOutputShape_v0_4):
    +1362            ref_t = str(shape.reference_tensor)
    +1363            if ref_t.count(".") == 1:
    +1364                t_id, orig_a_id = ref_t.split(".")
    +1365            else:
    +1366                t_id = ref_t
    +1367                orig_a_id = a
    +1368
    +1369            a_id = _AXIS_ID_MAP.get(orig_a_id, a)
    +1370            if not (orig_scale := shape.scale[i]):
    +1371                # old way to insert a new axis dimension
    +1372                size = int(2 * shape.offset[i])
    +1373            else:
    +1374                scale = 1 / orig_scale
    +1375                if axis_type in ("channel", "index"):
    +1376                    # these axes no longer have a scale
    +1377                    offset_from_scale = orig_scale * size_refs.get(
    +1378                        _TensorName_v0_4(t_id), {}
    +1379                    ).get(orig_a_id, 0)
    +1380                else:
    +1381                    offset_from_scale = 0
    +1382                size = SizeReference(
    +1383                    tensor_id=TensorId(t_id),
    +1384                    axis_id=AxisId(a_id),
    +1385                    offset=int(offset_from_scale + 2 * shape.offset[i]),
    +1386                )
    +1387        else:
    +1388            size = shape[i]
    +1389
    +1390        if axis_type == "time":
    +1391            if tensor_type == "input":
    +1392                ret.append(TimeInputAxis(size=size, scale=scale))
    +1393            else:
    +1394                assert not isinstance(size, ParameterizedSize)
    +1395                if halo is None:
    +1396                    ret.append(TimeOutputAxis(size=size, scale=scale))
    +1397                else:
    +1398                    assert not isinstance(size, int)
    +1399                    ret.append(
    +1400                        TimeOutputAxisWithHalo(size=size, scale=scale, halo=halo[i])
    +1401                    )
    +1402
    +1403        elif axis_type == "index":
    +1404            if tensor_type == "input":
    +1405                ret.append(IndexInputAxis(size=size))
    +1406            else:
    +1407                if isinstance(size, ParameterizedSize):
    +1408                    size = DataDependentSize(min=size.min)
    +1409
    +1410                ret.append(IndexOutputAxis(size=size))
    +1411        elif axis_type == "channel":
    +1412            assert not isinstance(size, ParameterizedSize)
    +1413            if isinstance(size, SizeReference):
    +1414                warnings.warn(
    +1415                    "Conversion of channel size from an implicit output shape may be"
    +1416                    + " wrong"
    +1417                )
    +1418                ret.append(
    +1419                    ChannelAxis(
    +1420                        channel_names=[
    +1421                            Identifier(f"channel{i}") for i in range(size.offset)
    +1422                        ]
    +1423                    )
    +1424                )
    +1425            else:
    +1426                ret.append(
    +1427                    ChannelAxis(
    +1428                        channel_names=[Identifier(f"channel{i}") for i in range(size)]
    +1429                    )
    +1430                )
    +1431        elif axis_type == "space":
    +1432            if tensor_type == "input":
    +1433                ret.append(SpaceInputAxis(id=AxisId(a), size=size, scale=scale))
    +1434            else:
    +1435                assert not isinstance(size, ParameterizedSize)
    +1436                if halo is None or halo[i] == 0:
    +1437                    ret.append(SpaceOutputAxis(id=AxisId(a), size=size, scale=scale))
    +1438                elif isinstance(size, int):
    +1439                    raise NotImplementedError(
    +1440                        f"output axis with halo and fixed size (here {size}) not allowed"
    +1441                    )
    +1442                else:
    +1443                    ret.append(
    +1444                        SpaceOutputAxisWithHalo(
    +1445                            id=AxisId(a), size=size, scale=scale, halo=halo[i]
    +1446                        )
    +1447                    )
    +1448
    +1449    return ret
    +1450
    +1451
    +1452_AXIS_TYPE_MAP = {
    +1453    "b": "batch",
    +1454    "t": "time",
    +1455    "i": "index",
    +1456    "c": "channel",
    +1457    "x": "space",
    +1458    "y": "space",
    +1459    "z": "space",
    +1460}
    +1461
    +1462_AXIS_ID_MAP = {
    +1463    "b": "batch",
    +1464    "t": "time",
    +1465    "i": "index",
    +1466    "c": "channel",
    +1467}
    +1468
    +1469
    +1470def _axes_letters_to_ids(
    +1471    axes: Optional[str],
    +1472) -> Optional[List[AxisId]]:
    +1473    if axes is None:
    +1474        return None
    +1475    return [AxisId(_AXIS_ID_MAP.get(a, a)) for a in map(str, axes)]
    +1476
    +1477
    +1478def _get_complement_v04_axis(
    +1479    tensor_axes: Sequence[str], axes: Optional[Sequence[str]]
    +1480) -> Optional[AxisId]:
    +1481    if axes is None:
    +1482        return None
    +1483
    +1484    non_complement_axes = set(axes) | {"b"}
    +1485    complement_axes = [a for a in tensor_axes if a not in non_complement_axes]
    +1486    if len(complement_axes) > 1:
    +1487        raise ValueError(
    +1488            f"Expected none or a single complement axis, but axes '{axes}' "
    +1489            + f"for tensor dims '{tensor_axes}' leave '{complement_axes}'."
    +1490        )
    +1491
    +1492    return None if not complement_axes else AxisId(complement_axes[0])
    +1493
    +1494
    +1495def _convert_proc(
    +1496    p: Union[_PreprocessingDescr_v0_4, _PostprocessingDescr_v0_4],
    +1497    tensor_axes: Sequence[str],
    +1498) -> Union[PreprocessingDescr, PostprocessingDescr]:
    +1499    if isinstance(p, _BinarizeDescr_v0_4):
    +1500        return BinarizeDescr(kwargs=BinarizeKwargs(threshold=p.kwargs.threshold))
    +1501    elif isinstance(p, _ClipDescr_v0_4):
    +1502        return ClipDescr(kwargs=ClipKwargs(min=p.kwargs.min, max=p.kwargs.max))
    +1503    elif isinstance(p, _SigmoidDescr_v0_4):
    +1504        return SigmoidDescr()
    +1505    elif isinstance(p, _ScaleLinearDescr_v0_4):
    +1506        axes = _axes_letters_to_ids(p.kwargs.axes)
    +1507        if p.kwargs.axes is None:
    +1508            axis = None
    +1509        else:
    +1510            axis = _get_complement_v04_axis(tensor_axes, p.kwargs.axes)
    +1511
    +1512        if axis is None:
    +1513            assert not isinstance(p.kwargs.gain, list)
    +1514            assert not isinstance(p.kwargs.offset, list)
    +1515            kwargs = ScaleLinearKwargs(gain=p.kwargs.gain, offset=p.kwargs.offset)
    +1516        else:
    +1517            kwargs = ScaleLinearAlongAxisKwargs(
    +1518                axis=axis, gain=p.kwargs.gain, offset=p.kwargs.offset
    +1519            )
    +1520        return ScaleLinearDescr(kwargs=kwargs)
    +1521    elif isinstance(p, _ScaleMeanVarianceDescr_v0_4):
    +1522        return ScaleMeanVarianceDescr(
    +1523            kwargs=ScaleMeanVarianceKwargs(
    +1524                axes=_axes_letters_to_ids(p.kwargs.axes),
    +1525                reference_tensor=TensorId(str(p.kwargs.reference_tensor)),
    +1526                eps=p.kwargs.eps,
    +1527            )
    +1528        )
    +1529    elif isinstance(p, _ZeroMeanUnitVarianceDescr_v0_4):
    +1530        if p.kwargs.mode == "fixed":
    +1531            mean = p.kwargs.mean
    +1532            std = p.kwargs.std
    +1533            assert mean is not None
    +1534            assert std is not None
    +1535
    +1536            axis = _get_complement_v04_axis(tensor_axes, p.kwargs.axes)
    +1537
    +1538            if axis is None:
    +1539                return FixedZeroMeanUnitVarianceDescr(
    +1540                    kwargs=FixedZeroMeanUnitVarianceKwargs(
    +1541                        mean=mean, std=std  # pyright: ignore[reportArgumentType]
    +1542                    )
    +1543                )
    +1544            else:
    +1545                if not isinstance(mean, list):
    +1546                    mean = [float(mean)]
    +1547                if not isinstance(std, list):
    +1548                    std = [float(std)]
    +1549
    +1550                return FixedZeroMeanUnitVarianceDescr(
    +1551                    kwargs=FixedZeroMeanUnitVarianceAlongAxisKwargs(
    +1552                        axis=axis, mean=mean, std=std
    +1553                    )
    +1554                )
    +1555
    +1556        else:
    +1557            axes = _axes_letters_to_ids(p.kwargs.axes) or []
    +1558            if p.kwargs.mode == "per_dataset":
    +1559                axes = [AxisId("batch")] + axes
    +1560            if not axes:
    +1561                axes = None
    +1562            return ZeroMeanUnitVarianceDescr(
    +1563                kwargs=ZeroMeanUnitVarianceKwargs(axes=axes, eps=p.kwargs.eps)
    +1564            )
    +1565
    +1566    elif isinstance(p, _ScaleRangeDescr_v0_4):
    +1567        return ScaleRangeDescr(
    +1568            kwargs=ScaleRangeKwargs(
    +1569                axes=_axes_letters_to_ids(p.kwargs.axes),
    +1570                min_percentile=p.kwargs.min_percentile,
    +1571                max_percentile=p.kwargs.max_percentile,
    +1572                eps=p.kwargs.eps,
    +1573            )
    +1574        )
    +1575    else:
    +1576        assert_never(p)
    +1577
    +1578
    +1579class _InputTensorConv(
    +1580    Converter[
    +1581        _InputTensorDescr_v0_4,
    +1582        InputTensorDescr,
    +1583        ImportantFileSource,
    +1584        Optional[ImportantFileSource],
    +1585        Mapping[_TensorName_v0_4, Mapping[str, int]],
    +1586    ]
    +1587):
    +1588    def _convert(
    +1589        self,
    +1590        src: _InputTensorDescr_v0_4,
    +1591        tgt: "type[InputTensorDescr] | type[dict[str, Any]]",
    +1592        test_tensor: ImportantFileSource,
    +1593        sample_tensor: Optional[ImportantFileSource],
    +1594        size_refs: Mapping[_TensorName_v0_4, Mapping[str, int]],
    +1595    ) -> "InputTensorDescr | dict[str, Any]":
    +1596        axes: List[InputAxis] = convert_axes(  # pyright: ignore[reportAssignmentType]
    +1597            src.axes,
    +1598            shape=src.shape,
    +1599            tensor_type="input",
    +1600            halo=None,
    +1601            size_refs=size_refs,
    +1602        )
    +1603        prep: List[PreprocessingDescr] = []
    +1604        for p in src.preprocessing:
    +1605            cp = _convert_proc(p, src.axes)
    +1606            assert not isinstance(cp, ScaleMeanVarianceDescr)
    +1607            prep.append(cp)
    +1608
    +1609        return tgt(
    +1610            axes=axes,
    +1611            id=TensorId(str(src.name)),
    +1612            test_tensor=FileDescr(source=test_tensor),
    +1613            sample_tensor=(
    +1614                None if sample_tensor is None else FileDescr(source=sample_tensor)
    +1615            ),
    +1616            data=dict(type=src.data_type),  # pyright: ignore[reportArgumentType]
    +1617            preprocessing=prep,
    +1618        )
    +1619
    +1620
    +1621_input_tensor_conv = _InputTensorConv(_InputTensorDescr_v0_4, InputTensorDescr)
    +1622
    +1623
    +1624class OutputTensorDescr(TensorDescrBase[OutputAxis]):
    +1625    id: TensorId = TensorId("output")
    +1626    """Output tensor id.
    +1627    No duplicates are allowed across all inputs and outputs."""
    +1628
    +1629    postprocessing: List[PostprocessingDescr] = Field(default_factory=list)
    +1630    """Description of how this output should be postprocessed.
    +1631
    +1632    note: `postprocessing` always ends with an 'ensure_dtype' operation.
    +1633          If not given this is added to cast to this tensor's `data.type`.
    +1634    """
    +1635
    +1636    @model_validator(mode="after")
    +1637    def _validate_postprocessing_kwargs(self) -> Self:
    +1638        axes_ids = [a.id for a in self.axes]
    +1639        for p in self.postprocessing:
    +1640            kwargs_axes: Optional[Sequence[Any]] = p.kwargs.get("axes")
    +1641            if kwargs_axes is None:
    +1642                continue
    +1643
    +1644            if not isinstance(kwargs_axes, collections.abc.Sequence):
    +1645                raise ValueError(
    +1646                    f"expected `axes` sequence, but got {type(kwargs_axes)}"
    +1647                )
    +1648
    +1649            if any(a not in axes_ids for a in kwargs_axes):
    +1650                raise ValueError("`kwargs.axes` needs to be subset of axes ids")
    +1651
    +1652        if isinstance(self.data, (NominalOrOrdinalDataDescr, IntervalOrRatioDataDescr)):
    +1653            dtype = self.data.type
    +1654        else:
    +1655            dtype = self.data[0].type
    +1656
    +1657        # ensure `postprocessing` ends with `EnsureDtypeDescr` or `BinarizeDescr`
    +1658        if not self.postprocessing or not isinstance(
    +1659            self.postprocessing[-1], (EnsureDtypeDescr, BinarizeDescr)
    +1660        ):
    +1661            self.postprocessing.append(
    +1662                EnsureDtypeDescr(kwargs=EnsureDtypeKwargs(dtype=dtype))
    +1663            )
    +1664        return self
    +1665
    +1666
    +1667class _OutputTensorConv(
    +1668    Converter[
    +1669        _OutputTensorDescr_v0_4,
    +1670        OutputTensorDescr,
    +1671        ImportantFileSource,
    +1672        Optional[ImportantFileSource],
    +1673        Mapping[_TensorName_v0_4, Mapping[str, int]],
    +1674    ]
    +1675):
    +1676    def _convert(
    +1677        self,
    +1678        src: _OutputTensorDescr_v0_4,
    +1679        tgt: "type[OutputTensorDescr] | type[dict[str, Any]]",
    +1680        test_tensor: ImportantFileSource,
    +1681        sample_tensor: Optional[ImportantFileSource],
    +1682        size_refs: Mapping[_TensorName_v0_4, Mapping[str, int]],
    +1683    ) -> "OutputTensorDescr | dict[str, Any]":
    +1684        # TODO: split convert_axes into convert_output_axes and convert_input_axes
    +1685        axes: List[OutputAxis] = convert_axes(  # pyright: ignore[reportAssignmentType]
    +1686            src.axes,
    +1687            shape=src.shape,
    +1688            tensor_type="output",
    +1689            halo=src.halo,
    +1690            size_refs=size_refs,
    +1691        )
    +1692        data_descr: Dict[str, Any] = dict(type=src.data_type)
    +1693        if data_descr["type"] == "bool":
    +1694            data_descr["values"] = [False, True]
    +1695
    +1696        return tgt(
    +1697            axes=axes,
    +1698            id=TensorId(str(src.name)),
    +1699            test_tensor=FileDescr(source=test_tensor),
    +1700            sample_tensor=(
    +1701                None if sample_tensor is None else FileDescr(source=sample_tensor)
    +1702            ),
    +1703            data=data_descr,  # pyright: ignore[reportArgumentType]
    +1704            postprocessing=[_convert_proc(p, src.axes) for p in src.postprocessing],
    +1705        )
    +1706
    +1707
    +1708_output_tensor_conv = _OutputTensorConv(_OutputTensorDescr_v0_4, OutputTensorDescr)
    +1709
    +1710
    +1711TensorDescr = Union[InputTensorDescr, OutputTensorDescr]
    +1712
    +1713
    +1714def validate_tensors(
    +1715    tensors: Mapping[TensorId, Tuple[TensorDescr, NDArray[Any]]],
    +1716    tensor_origin: str,  # for more precise error messages, e.g. 'test_tensor'
    +1717):
    +1718    all_tensor_axes: Dict[TensorId, Dict[AxisId, Tuple[AnyAxis, int]]] = {}
    +1719
    +1720    def e_msg(d: TensorDescr):
    +1721        return f"{'inputs' if isinstance(d, InputTensorDescr) else 'outputs'}[{d.id}]"
    +1722
    +1723    for descr, array in tensors.values():
    +1724        try:
    +1725            axis_sizes = descr.get_axis_sizes_for_array(array)
    +1726        except ValueError as e:
    +1727            raise ValueError(f"{e_msg(descr)} {e}")
    +1728        else:
    +1729            all_tensor_axes[descr.id] = {
    +1730                a.id: (a, axis_sizes[a.id]) for a in descr.axes
    +1731            }
    +1732
    +1733    for descr, array in tensors.values():
    +1734        if array.dtype.name != descr.dtype:
    +1735            raise ValueError(
    +1736                f"{e_msg(descr)}.{tensor_origin}.dtype '{array.dtype.name}' does not"
    +1737                + f" match described dtype '{descr.dtype}'"
    +1738            )
    +1739
    +1740        for a in descr.axes:
    +1741            actual_size = all_tensor_axes[descr.id][a.id][1]
    +1742            if a.size is None:
    +1743                continue
    +1744
    +1745            if isinstance(a.size, int):
    +1746                if actual_size != a.size:
    +1747                    raise ValueError(
    +1748                        f"{e_msg(descr)}.{tensor_origin}: axis '{a.id}' "
    +1749                        + f"has incompatible size {actual_size}, expected {a.size}"
    +1750                    )
    +1751            elif isinstance(a.size, ParameterizedSize):
    +1752                _ = a.size.validate_size(actual_size)
    +1753            elif isinstance(a.size, DataDependentSize):
    +1754                _ = a.size.validate_size(actual_size)
    +1755            elif isinstance(a.size, SizeReference):
    +1756                ref_tensor_axes = all_tensor_axes.get(a.size.tensor_id)
    +1757                if ref_tensor_axes is None:
    +1758                    raise ValueError(
    +1759                        f"{e_msg(descr)}.axes[{a.id}].size.tensor_id: Unknown tensor"
    +1760                        + f" reference '{a.size.tensor_id}'"
    +1761                    )
    +1762
    +1763                ref_axis, ref_size = ref_tensor_axes.get(a.size.axis_id, (None, None))
    +1764                if ref_axis is None or ref_size is None:
    +1765                    raise ValueError(
    +1766                        f"{e_msg(descr)}.axes[{a.id}].size.axis_id: Unknown tensor axis"
    +1767                        + f" reference '{a.size.tensor_id}.{a.size.axis_id}"
    +1768                    )
    +1769
    +1770                if a.unit != ref_axis.unit:
    +1771                    raise ValueError(
    +1772                        f"{e_msg(descr)}.axes[{a.id}].size: `SizeReference` requires"
    +1773                        + " axis and reference axis to have the same `unit`, but"
    +1774                        + f" {a.unit}!={ref_axis.unit}"
    +1775                    )
    +1776
    +1777                if actual_size != (
    +1778                    expected_size := (
    +1779                        ref_size * ref_axis.scale / a.scale + a.size.offset
    +1780                    )
    +1781                ):
    +1782                    raise ValueError(
    +1783                        f"{e_msg(descr)}.{tensor_origin}: axis '{a.id}' of size"
    +1784                        + f" {actual_size} invalid for referenced size {ref_size};"
    +1785                        + f" expected {expected_size}"
    +1786                    )
    +1787            else:
    +1788                assert_never(a.size)
    +1789
    +1790
    +1791class EnvironmentFileDescr(FileDescr):
    +1792    source: Annotated[
    +1793        ImportantFileSource,
    +1794        WithSuffix((".yaml", ".yml"), case_sensitive=True),
    +1795        Field(
    +1796            examples=["environment.yaml"],
    +1797        ),
    +1798    ]
    +1799    """∈📦 Conda environment file.
    +1800    Allows to specify custom dependencies, see conda docs:
    +1801    - [Exporting an environment file across platforms](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#exporting-an-environment-file-across-platforms)
    +1802    - [Creating an environment file manually](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#creating-an-environment-file-manually)
    +1803    """
    +1804
    +1805
    +1806class _ArchitectureCallableDescr(Node):
    +1807    callable: Annotated[Identifier, Field(examples=["MyNetworkClass", "get_my_model"])]
    +1808    """Identifier of the callable that returns a torch.nn.Module instance."""
    +1809
    +1810    kwargs: Dict[str, YamlValue] = Field(default_factory=dict)
    +1811    """key word arguments for the `callable`"""
    +1812
    +1813
    +1814class ArchitectureFromFileDescr(_ArchitectureCallableDescr, FileDescr):
    +1815    pass
    +1816
    +1817
    +1818class ArchitectureFromLibraryDescr(_ArchitectureCallableDescr):
    +1819    import_from: str
    +1820    """Where to import the callable from, i.e. `from <import_from> import <callable>`"""
    +1821
    +1822
    +1823ArchitectureDescr = Annotated[
    +1824    Union[ArchitectureFromFileDescr, ArchitectureFromLibraryDescr],
    +1825    Field(union_mode="left_to_right"),
    +1826]
    +1827
    +1828
    +1829class _ArchFileConv(
    +1830    Converter[
    +1831        _CallableFromFile_v0_4,
    +1832        ArchitectureFromFileDescr,
    +1833        Optional[Sha256],
    +1834        Dict[str, Any],
    +1835    ]
    +1836):
    +1837    def _convert(
    +1838        self,
    +1839        src: _CallableFromFile_v0_4,
    +1840        tgt: "type[ArchitectureFromFileDescr | dict[str, Any]]",
    +1841        sha256: Optional[Sha256],
    +1842        kwargs: Dict[str, Any],
    +1843    ) -> "ArchitectureFromFileDescr | dict[str, Any]":
    +1844        if src.startswith("http") and src.count(":") == 2:
    +1845            http, source, callable_ = src.split(":")
    +1846            source = ":".join((http, source))
    +1847        elif not src.startswith("http") and src.count(":") == 1:
    +1848            source, callable_ = src.split(":")
    +1849        else:
    +1850            source = str(src)
    +1851            callable_ = str(src)
    +1852        return tgt(
    +1853            callable=Identifier(callable_),
    +1854            source=cast(ImportantFileSource, source),
    +1855            sha256=sha256,
    +1856            kwargs=kwargs,
    +1857        )
    +1858
    +1859
    +1860_arch_file_conv = _ArchFileConv(_CallableFromFile_v0_4, ArchitectureFromFileDescr)
    +1861
    +1862
    +1863class _ArchLibConv(
    +1864    Converter[
    +1865        _CallableFromDepencency_v0_4, ArchitectureFromLibraryDescr, Dict[str, Any]
    +1866    ]
    +1867):
    +1868    def _convert(
    +1869        self,
    +1870        src: _CallableFromDepencency_v0_4,
    +1871        tgt: "type[ArchitectureFromLibraryDescr | dict[str, Any]]",
    +1872        kwargs: Dict[str, Any],
    +1873    ) -> "ArchitectureFromLibraryDescr | dict[str, Any]":
    +1874        *mods, callable_ = src.split(".")
    +1875        import_from = ".".join(mods)
    +1876        return tgt(
    +1877            import_from=import_from, callable=Identifier(callable_), kwargs=kwargs
    +1878        )
    +1879
    +1880
    +1881_arch_lib_conv = _ArchLibConv(
    +1882    _CallableFromDepencency_v0_4, ArchitectureFromLibraryDescr
    +1883)
    +1884
    +1885
    +1886class WeightsEntryDescrBase(FileDescr):
    +1887    type: ClassVar[WeightsFormat]
    +1888    weights_format_name: ClassVar[str]  # human readable
    +1889
    +1890    source: ImportantFileSource
    +1891    """∈📦 The weights file."""
    +1892
    +1893    authors: Optional[List[Author]] = None
    +1894    """Authors
    +1895    Either the person(s) that have trained this model resulting in the original weights file.
    +1896        (If this is the initial weights entry, i.e. it does not have a `parent`)
    +1897    Or the person(s) who have converted the weights to this weights format.
    +1898        (If this is a child weight, i.e. it has a `parent` field)
    +1899    """
    +1900
    +1901    parent: Annotated[
    +1902        Optional[WeightsFormat], Field(examples=["pytorch_state_dict"])
    +1903    ] = None
    +1904    """The source weights these weights were converted from.
    +1905    For example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,
    +1906    The `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.
    +1907    All weight entries except one (the initial set of weights resulting from training the model),
    +1908    need to have this field."""
    +1909
    +1910    @model_validator(mode="after")
    +1911    def check_parent_is_not_self(self) -> Self:
    +1912        if self.type == self.parent:
    +1913            raise ValueError("Weights entry can't be it's own parent.")
    +1914
    +1915        return self
    +1916
    +1917
    +1918class KerasHdf5WeightsDescr(WeightsEntryDescrBase):
    +1919    type = "keras_hdf5"
    +1920    weights_format_name: ClassVar[str] = "Keras HDF5"
    +1921    tensorflow_version: Version
    +1922    """TensorFlow version used to create these weights."""
    +1923
    +1924
    +1925class OnnxWeightsDescr(WeightsEntryDescrBase):
    +1926    type = "onnx"
    +1927    weights_format_name: ClassVar[str] = "ONNX"
    +1928    opset_version: Annotated[int, Ge(7)]
    +1929    """ONNX opset version"""
    +1930
    +1931
    +1932class PytorchStateDictWeightsDescr(WeightsEntryDescrBase):
    +1933    type = "pytorch_state_dict"
    +1934    weights_format_name: ClassVar[str] = "Pytorch State Dict"
    +1935    architecture: ArchitectureDescr
    +1936    pytorch_version: Version
    +1937    """Version of the PyTorch library used.
    +1938    If `architecture.depencencies` is specified it has to include pytorch and any version pinning has to be compatible.
    +1939    """
    +1940    dependencies: Optional[EnvironmentFileDescr] = None
    +1941    """Custom depencies beyond pytorch.
    +1942    The conda environment file should include pytorch and any version pinning has to be compatible with
    +1943    `pytorch_version`.
    +1944    """
    +1945
    +1946
    +1947class TensorflowJsWeightsDescr(WeightsEntryDescrBase):
    +1948    type = "tensorflow_js"
    +1949    weights_format_name: ClassVar[str] = "Tensorflow.js"
    +1950    tensorflow_version: Version
    +1951    """Version of the TensorFlow library used."""
    +1952
    +1953    source: ImportantFileSource
    +1954    """∈📦 The multi-file weights.
    +1955    All required files/folders should be a zip archive."""
    +1956
    +1957
    +1958class TensorflowSavedModelBundleWeightsDescr(WeightsEntryDescrBase):
    +1959    type = "tensorflow_saved_model_bundle"
    +1960    weights_format_name: ClassVar[str] = "Tensorflow Saved Model"
    +1961    tensorflow_version: Version
    +1962    """Version of the TensorFlow library used."""
    +1963
    +1964    dependencies: Optional[EnvironmentFileDescr] = None
    +1965    """Custom dependencies beyond tensorflow.
    +1966    Should include tensorflow and any version pinning has to be compatible with `tensorflow_version`."""
    +1967
    +1968    source: ImportantFileSource
    +1969    """∈📦 The multi-file weights.
    +1970    All required files/folders should be a zip archive."""
    +1971
    +1972
    +1973class TorchscriptWeightsDescr(WeightsEntryDescrBase):
    +1974    type = "torchscript"
    +1975    weights_format_name: ClassVar[str] = "TorchScript"
    +1976    pytorch_version: Version
    +1977    """Version of the PyTorch library used."""
    +1978
    +1979
    +1980class WeightsDescr(Node):
    +1981    keras_hdf5: Optional[KerasHdf5WeightsDescr] = None
    +1982    onnx: Optional[OnnxWeightsDescr] = None
    +1983    pytorch_state_dict: Optional[PytorchStateDictWeightsDescr] = None
    +1984    tensorflow_js: Optional[TensorflowJsWeightsDescr] = None
    +1985    tensorflow_saved_model_bundle: Optional[TensorflowSavedModelBundleWeightsDescr] = (
    +1986        None
    +1987    )
    +1988    torchscript: Optional[TorchscriptWeightsDescr] = None
    +1989
    +1990    @model_validator(mode="after")
    +1991    def check_entries(self) -> Self:
    +1992        entries = {wtype for wtype, entry in self if entry is not None}
    +1993
    +1994        if not entries:
    +1995            raise ValueError("Missing weights entry")
    +1996
    +1997        entries_wo_parent = {
    +1998            wtype
    +1999            for wtype, entry in self
    +2000            if entry is not None and hasattr(entry, "parent") and entry.parent is None
    +2001        }
    +2002        if len(entries_wo_parent) != 1:
    +2003            issue_warning(
    +2004                "Exactly one weights entry may not specify the `parent` field (got"
    +2005                + " {value}). That entry is considered the original set of model weights."
    +2006                + " Other weight formats are created through conversion of the orignal or"
    +2007                + " already converted weights. They have to reference the weights format"
    +2008                + " they were converted from as their `parent`.",
    +2009                value=len(entries_wo_parent),
    +2010                field="weights",
    +2011            )
    +2012
    +2013        for wtype, entry in self:
    +2014            if entry is None:
    +2015                continue
    +2016
    +2017            assert hasattr(entry, "type")
    +2018            assert hasattr(entry, "parent")
    +2019            assert wtype == entry.type
    +2020            if (
    +2021                entry.parent is not None and entry.parent not in entries
    +2022            ):  # self reference checked for `parent` field
    +2023                raise ValueError(
    +2024                    f"`weights.{wtype}.parent={entry.parent} not in specified weight"
    +2025                    + f" formats: {entries}"
    +2026                )
    +2027
    +2028        return self
    +2029
    +2030
    +2031class ModelId(ResourceId):
    +2032    pass
    +2033
    +2034
    +2035class LinkedModel(LinkedResourceNode):
    +2036    """Reference to a bioimage.io model."""
    +2037
    +2038    id: ModelId
    +2039    """A valid model `id` from the bioimage.io collection."""
    +2040
    +2041
    +2042class _DataDepSize(NamedTuple):
    +2043    min: int
    +2044    max: Optional[int]
    +2045
    +2046
    +2047class _AxisSizes(NamedTuple):
    +2048    """the lenghts of all axes of model inputs and outputs"""
    +2049
    +2050    inputs: Dict[Tuple[TensorId, AxisId], int]
    +2051    outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]]
    +2052
    +2053
    +2054class _TensorSizes(NamedTuple):
    +2055    """_AxisSizes as nested dicts"""
    +2056
    +2057    inputs: Dict[TensorId, Dict[AxisId, int]]
    +2058    outputs: Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]
    +2059
    +2060
    +2061class ModelDescr(GenericModelDescrBase, title="bioimage.io model specification"):
    +2062    """Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights.
    +2063    These fields are typically stored in a YAML file which we call a model resource description file (model RDF).
    +2064    """
    +2065
    +2066    format_version: Literal["0.5.3"] = "0.5.3"
    +2067    """Version of the bioimage.io model description specification used.
    +2068    When creating a new model always use the latest micro/patch version described here.
    +2069    The `format_version` is important for any consumer software to understand how to parse the fields.
    +2070    """
    +2071
    +2072    type: Literal["model"] = "model"
    +2073    """Specialized resource type 'model'"""
    +2074
    +2075    id: Optional[ModelId] = None
    +2076    """bioimage.io-wide unique resource identifier
    +2077    assigned by bioimage.io; version **un**specific."""
    +2078
    +2079    authors: NotEmpty[List[Author]]
    +2080    """The authors are the creators of the model RDF and the primary points of contact."""
    +2081
    +2082    documentation: Annotated[
    +2083        DocumentationSource,
    +2084        Field(
    +2085            examples=[
    +2086                "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
    +2087                "README.md",
    +2088            ],
    +2089        ),
    +2090    ]
    +2091    """∈📦 URL or relative path to a markdown file with additional documentation.
    +2092    The recommended documentation file name is `README.md`. An `.md` suffix is mandatory.
    +2093    The documentation should include a '#[#] Validation' (sub)section
    +2094    with details on how to quantitatively validate the model on unseen data."""
    +2095
    +2096    @field_validator("documentation", mode="after")
    +2097    @classmethod
    +2098    def _validate_documentation(cls, value: DocumentationSource) -> DocumentationSource:
    +2099        if not validation_context_var.get().perform_io_checks:
    +2100            return value
    +2101
    +2102        doc_path = download(value).path
    +2103        doc_content = doc_path.read_text(encoding="utf-8")
    +2104        assert isinstance(doc_content, str)
    +2105        if not re.match("#.*[vV]alidation", doc_content):
    +2106            issue_warning(
    +2107                "No '# Validation' (sub)section found in {value}.",
    +2108                value=value,
    +2109                field="documentation",
    +2110            )
    +2111
    +2112        return value
    +2113
    +2114    inputs: NotEmpty[Sequence[InputTensorDescr]]
    +2115    """Describes the input tensors expected by this model."""
    +2116
    +2117    @field_validator("inputs", mode="after")
    +2118    @classmethod
    +2119    def _validate_input_axes(
    +2120        cls, inputs: Sequence[InputTensorDescr]
    +2121    ) -> Sequence[InputTensorDescr]:
    +2122        input_size_refs = cls._get_axes_with_independent_size(inputs)
    +2123
    +2124        for i, ipt in enumerate(inputs):
    +2125            valid_independent_refs: Dict[
    +2126                Tuple[TensorId, AxisId],
    +2127                Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    +2128            ] = {
    +2129                **{
    +2130                    (ipt.id, a.id): (ipt, a, a.size)
    +2131                    for a in ipt.axes
    +2132                    if not isinstance(a, BatchAxis)
    +2133                    and isinstance(a.size, (int, ParameterizedSize))
    +2134                },
    +2135                **input_size_refs,
    +2136            }
    +2137            for a, ax in enumerate(ipt.axes):
    +2138                cls._validate_axis(
    +2139                    "inputs",
    +2140                    i=i,
    +2141                    tensor_id=ipt.id,
    +2142                    a=a,
    +2143                    axis=ax,
    +2144                    valid_independent_refs=valid_independent_refs,
    +2145                )
    +2146        return inputs
    +2147
    +2148    @staticmethod
    +2149    def _validate_axis(
    +2150        field_name: str,
    +2151        i: int,
    +2152        tensor_id: TensorId,
    +2153        a: int,
    +2154        axis: AnyAxis,
    +2155        valid_independent_refs: Dict[
    +2156            Tuple[TensorId, AxisId],
    +2157            Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    +2158        ],
    +2159    ):
    +2160        if isinstance(axis, BatchAxis) or isinstance(
    +2161            axis.size, (int, ParameterizedSize, DataDependentSize)
    +2162        ):
    +2163            return
    +2164        elif not isinstance(axis.size, SizeReference):
    +2165            assert_never(axis.size)
    +2166
    +2167        # validate axis.size SizeReference
    +2168        ref = (axis.size.tensor_id, axis.size.axis_id)
    +2169        if ref not in valid_independent_refs:
    +2170            raise ValueError(
    +2171                "Invalid tensor axis reference at"
    +2172                + f" {field_name}[{i}].axes[{a}].size: {axis.size}."
    +2173            )
    +2174        if ref == (tensor_id, axis.id):
    +2175            raise ValueError(
    +2176                "Self-referencing not allowed for"
    +2177                + f" {field_name}[{i}].axes[{a}].size: {axis.size}"
    +2178            )
    +2179        if axis.type == "channel":
    +2180            if valid_independent_refs[ref][1].type != "channel":
    +2181                raise ValueError(
    +2182                    "A channel axis' size may only reference another fixed size"
    +2183                    + " channel axis."
    +2184                )
    +2185            if isinstance(axis.channel_names, str) and "{i}" in axis.channel_names:
    +2186                ref_size = valid_independent_refs[ref][2]
    +2187                assert isinstance(ref_size, int), (
    +2188                    "channel axis ref (another channel axis) has to specify fixed"
    +2189                    + " size"
    +2190                )
    +2191                generated_channel_names = [
    +2192                    Identifier(axis.channel_names.format(i=i))
    +2193                    for i in range(1, ref_size + 1)
    +2194                ]
    +2195                axis.channel_names = generated_channel_names
    +2196
    +2197        if (ax_unit := getattr(axis, "unit", None)) != (
    +2198            ref_unit := getattr(valid_independent_refs[ref][1], "unit", None)
    +2199        ):
    +2200            raise ValueError(
    +2201                "The units of an axis and its reference axis need to match, but"
    +2202                + f" '{ax_unit}' != '{ref_unit}'."
    +2203            )
    +2204        ref_axis = valid_independent_refs[ref][1]
    +2205        if isinstance(ref_axis, BatchAxis):
    +2206            raise ValueError(
    +2207                f"Invalid reference axis '{ref_axis.id}' for {tensor_id}.{axis.id}"
    +2208                + " (a batch axis is not allowed as reference)."
    +2209            )
    +2210
    +2211        if isinstance(axis, WithHalo):
    +2212            min_size = axis.size.get_size(axis, ref_axis, n=0)
    +2213            if (min_size - 2 * axis.halo) < 1:
    +2214                raise ValueError(
    +2215                    f"axis {axis.id} with minimum size {min_size} is too small for halo"
    +2216                    + f" {axis.halo}."
    +2217                )
    +2218
    +2219            input_halo = axis.halo * axis.scale / ref_axis.scale
    +2220            if input_halo != int(input_halo) or input_halo % 2 == 1:
    +2221                raise ValueError(
    +2222                    f"input_halo {input_halo} (output_halo {axis.halo} *"
    +2223                    + f" output_scale {axis.scale} / input_scale {ref_axis.scale})"
    +2224                    + f" is not an even integer for {tensor_id}.{axis.id}."
    +2225                )
    +2226
    +2227    @model_validator(mode="after")
    +2228    def _validate_test_tensors(self) -> Self:
    +2229        if not validation_context_var.get().perform_io_checks:
    +2230            return self
    +2231
    +2232        test_arrays = [
    +2233            load_array(descr.test_tensor.download().path)
    +2234            for descr in chain(self.inputs, self.outputs)
    +2235        ]
    +2236        tensors = {
    +2237            descr.id: (descr, array)
    +2238            for descr, array in zip(chain(self.inputs, self.outputs), test_arrays)
    +2239        }
    +2240        validate_tensors(tensors, tensor_origin="test_tensor")
    +2241        return self
    +2242
    +2243    @model_validator(mode="after")
    +2244    def _validate_tensor_references_in_proc_kwargs(self, info: ValidationInfo) -> Self:
    +2245        ipt_refs = {t.id for t in self.inputs}
    +2246        out_refs = {t.id for t in self.outputs}
    +2247        for ipt in self.inputs:
    +2248            for p in ipt.preprocessing:
    +2249                ref = p.kwargs.get("reference_tensor")
    +2250                if ref is None:
    +2251                    continue
    +2252                if ref not in ipt_refs:
    +2253                    raise ValueError(
    +2254                        f"`reference_tensor` '{ref}' not found. Valid input tensor"
    +2255                        + f" references are: {ipt_refs}."
    +2256                    )
    +2257
    +2258        for out in self.outputs:
    +2259            for p in out.postprocessing:
    +2260                ref = p.kwargs.get("reference_tensor")
    +2261                if ref is None:
    +2262                    continue
    +2263
    +2264                if ref not in ipt_refs and ref not in out_refs:
    +2265                    raise ValueError(
    +2266                        f"`reference_tensor` '{ref}' not found. Valid tensor references"
    +2267                        + f" are: {ipt_refs | out_refs}."
    +2268                    )
    +2269
    +2270        return self
    +2271
    +2272    # TODO: use validate funcs in validate_test_tensors
    +2273    # def validate_inputs(self, input_tensors: Mapping[TensorId, NDArray[Any]]) -> Mapping[TensorId, NDArray[Any]]:
    +2274
    +2275    name: Annotated[
    +2276        Annotated[
    +2277            str, RestrictCharacters(string.ascii_letters + string.digits + "_- ()")
    +2278        ],
    +2279        MinLen(5),
    +2280        MaxLen(128),
    +2281        warn(MaxLen(64), "Name longer than 64 characters.", INFO),
    +2282    ]
    +2283    """A human-readable name of this model.
    +2284    It should be no longer than 64 characters
    +2285    and may only contain letter, number, underscore, minus, parentheses and spaces.
    +2286    We recommend to chose a name that refers to the model's task and image modality.
    +2287    """
    +2288
    +2289    outputs: NotEmpty[Sequence[OutputTensorDescr]]
    +2290    """Describes the output tensors."""
    +2291
    +2292    @field_validator("outputs", mode="after")
    +2293    @classmethod
    +2294    def _validate_tensor_ids(
    +2295        cls, outputs: Sequence[OutputTensorDescr], info: ValidationInfo
    +2296    ) -> Sequence[OutputTensorDescr]:
    +2297        tensor_ids = [
    +2298            t.id for t in info.data.get("inputs", []) + info.data.get("outputs", [])
    +2299        ]
    +2300        duplicate_tensor_ids: List[str] = []
    +2301        seen: Set[str] = set()
    +2302        for t in tensor_ids:
    +2303            if t in seen:
    +2304                duplicate_tensor_ids.append(t)
    +2305
    +2306            seen.add(t)
    +2307
    +2308        if duplicate_tensor_ids:
    +2309            raise ValueError(f"Duplicate tensor ids: {duplicate_tensor_ids}")
    +2310
    +2311        return outputs
    +2312
    +2313    @staticmethod
    +2314    def _get_axes_with_parameterized_size(
    +2315        io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
    +2316    ):
    +2317        return {
    +2318            f"{t.id}.{a.id}": (t, a, a.size)
    +2319            for t in io
    +2320            for a in t.axes
    +2321            if not isinstance(a, BatchAxis) and isinstance(a.size, ParameterizedSize)
    +2322        }
    +2323
    +2324    @staticmethod
    +2325    def _get_axes_with_independent_size(
    +2326        io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
    +2327    ):
    +2328        return {
    +2329            (t.id, a.id): (t, a, a.size)
    +2330            for t in io
    +2331            for a in t.axes
    +2332            if not isinstance(a, BatchAxis)
    +2333            and isinstance(a.size, (int, ParameterizedSize))
    +2334        }
    +2335
    +2336    @field_validator("outputs", mode="after")
    +2337    @classmethod
    +2338    def _validate_output_axes(
    +2339        cls, outputs: List[OutputTensorDescr], info: ValidationInfo
    +2340    ) -> List[OutputTensorDescr]:
    +2341        input_size_refs = cls._get_axes_with_independent_size(
    +2342            info.data.get("inputs", [])
    +2343        )
    +2344        output_size_refs = cls._get_axes_with_independent_size(outputs)
    +2345
    +2346        for i, out in enumerate(outputs):
    +2347            valid_independent_refs: Dict[
    +2348                Tuple[TensorId, AxisId],
    +2349                Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    +2350            ] = {
    +2351                **{
    +2352                    (out.id, a.id): (out, a, a.size)
    +2353                    for a in out.axes
    +2354                    if not isinstance(a, BatchAxis)
    +2355                    and isinstance(a.size, (int, ParameterizedSize))
    +2356                },
    +2357                **input_size_refs,
    +2358                **output_size_refs,
    +2359            }
    +2360            for a, ax in enumerate(out.axes):
    +2361                cls._validate_axis(
    +2362                    "outputs",
    +2363                    i,
    +2364                    out.id,
    +2365                    a,
    +2366                    ax,
    +2367                    valid_independent_refs=valid_independent_refs,
    +2368                )
    +2369
    +2370        return outputs
    +2371
    +2372    packaged_by: List[Author] = Field(default_factory=list)
    +2373    """The persons that have packaged and uploaded this model.
    +2374    Only required if those persons differ from the `authors`."""
    +2375
    +2376    parent: Optional[LinkedModel] = None
    +2377    """The model from which this model is derived, e.g. by fine-tuning the weights."""
    +2378
    +2379    # todo: add parent self check once we have `id`
    +2380    # @model_validator(mode="after")
    +2381    # def validate_parent_is_not_self(self) -> Self:
    +2382    #     if self.parent is not None and self.parent == self.id:
    +2383    #         raise ValueError("The model may not reference itself as parent model")
    +2384
    +2385    #     return self
    +2386
    +2387    run_mode: Annotated[
    +2388        Optional[RunMode],
    +2389        warn(None, "Run mode '{value}' has limited support across consumer softwares."),
    +2390    ] = None
    +2391    """Custom run mode for this model: for more complex prediction procedures like test time
    +2392    data augmentation that currently cannot be expressed in the specification.
    +2393    No standard run modes are defined yet."""
    +2394
    +2395    timestamp: Datetime = Datetime(datetime.now())
    +2396    """Timestamp in [ISO 8601](#https://en.wikipedia.org/wiki/ISO_8601) format
    +2397    with a few restrictions listed [here](https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat).
    +2398    (In Python a datetime object is valid, too)."""
    +2399
    +2400    training_data: Annotated[
    +2401        Union[None, LinkedDataset, DatasetDescr, DatasetDescr02],
    +2402        Field(union_mode="left_to_right"),
    +2403    ] = None
    +2404    """The dataset used to train this model"""
    +2405
    +2406    weights: Annotated[WeightsDescr, WrapSerializer(package_weights)]
    +2407    """The weights for this model.
    +2408    Weights can be given for different formats, but should otherwise be equivalent.
    +2409    The available weight formats determine which consumers can use this model."""
    +2410
    +2411    @model_validator(mode="after")
    +2412    def _add_default_cover(self) -> Self:
    +2413        if not validation_context_var.get().perform_io_checks or self.covers:
    +2414            return self
    +2415
    +2416        try:
    +2417            generated_covers = generate_covers(
    +2418                [(t, load_array(t.test_tensor.download().path)) for t in self.inputs],
    +2419                [(t, load_array(t.test_tensor.download().path)) for t in self.outputs],
    +2420            )
    +2421        except Exception as e:
    +2422            issue_warning(
    +2423                "Failed to generate cover image(s): {e}",
    +2424                value=self.covers,
    +2425                msg_context=dict(e=e),
    +2426                field="covers",
    +2427            )
    +2428        else:
    +2429            self.covers.extend(generated_covers)
    +2430
    +2431        return self
    +2432
    +2433    def get_input_test_arrays(self) -> List[NDArray[Any]]:
    +2434        data = [load_array(ipt.test_tensor.download().path) for ipt in self.inputs]
    +2435        assert all(isinstance(d, np.ndarray) for d in data)
    +2436        return data
    +2437
    +2438    def get_output_test_arrays(self) -> List[NDArray[Any]]:
    +2439        data = [load_array(out.test_tensor.download().path) for out in self.outputs]
    +2440        assert all(isinstance(d, np.ndarray) for d in data)
    +2441        return data
    +2442
    +2443    @staticmethod
    +2444    def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int:
    +2445        batch_size = 1
    +2446        tensor_with_batchsize: Optional[TensorId] = None
    +2447        for tid in tensor_sizes:
    +2448            for aid, s in tensor_sizes[tid].items():
    +2449                if aid != BATCH_AXIS_ID or s == 1 or s == batch_size:
    +2450                    continue
    +2451
    +2452                if batch_size != 1:
    +2453                    assert tensor_with_batchsize is not None
    +2454                    raise ValueError(
    +2455                        f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})"
    +2456                    )
    +2457
    +2458                batch_size = s
    +2459                tensor_with_batchsize = tid
    +2460
    +2461        return batch_size
    +2462
    +2463    def get_output_tensor_sizes(
    +2464        self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]
    +2465    ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]:
    +2466        """Returns the tensor output sizes for given **input_sizes**.
    +2467        Only if **input_sizes** has a valid input shape, the tensor output size is exact.
    +2468        Otherwise it might be larger than the actual (valid) output"""
    +2469        batch_size = self.get_batch_size(input_sizes)
    +2470        ns = self.get_ns(input_sizes)
    +2471
    +2472        tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size)
    +2473        return tensor_sizes.outputs
    +2474
    +2475    def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]):
    +2476        """get parameter `n` for each parameterized axis
    +2477        such that the valid input size is >= the given input size"""
    +2478        ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {}
    +2479        axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs}
    +2480        for tid in input_sizes:
    +2481            for aid, s in input_sizes[tid].items():
    +2482                size_descr = axes[tid][aid].size
    +2483                if isinstance(size_descr, ParameterizedSize):
    +2484                    ret[(tid, aid)] = size_descr.get_n(s)
    +2485                elif size_descr is None or isinstance(size_descr, (int, SizeReference)):
    +2486                    pass
    +2487                else:
    +2488                    assert_never(size_descr)
    +2489
    +2490        return ret
    +2491
    +2492    def get_tensor_sizes(
    +2493        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
    +2494    ) -> _TensorSizes:
    +2495        axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size)
    +2496        return _TensorSizes(
    +2497            {
    +2498                t: {
    +2499                    aa: axis_sizes.inputs[(tt, aa)]
    +2500                    for tt, aa in axis_sizes.inputs
    +2501                    if tt == t
    +2502                }
    +2503                for t in {tt for tt, _ in axis_sizes.inputs}
    +2504            },
    +2505            {
    +2506                t: {
    +2507                    aa: axis_sizes.outputs[(tt, aa)]
    +2508                    for tt, aa in axis_sizes.outputs
    +2509                    if tt == t
    +2510                }
    +2511                for t in {tt for tt, _ in axis_sizes.outputs}
    +2512            },
    +2513        )
    +2514
    +2515    def get_axis_sizes(
    +2516        self,
    +2517        ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N],
    +2518        batch_size: Optional[int] = None,
    +2519        *,
    +2520        max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None,
    +2521    ) -> _AxisSizes:
    +2522        """Determine input and output block shape for scale factors **ns**
    +2523        of parameterized input sizes.
    +2524
    +2525        Args:
    +2526            ns: Scale factor `n` for each axis (keyed by (tensor_id, axis_id))
    +2527                that is parameterized as `size = min + n * step`.
    +2528            batch_size: The desired size of the batch dimension.
    +2529                If given **batch_size** overwrites any batch size present in
    +2530                **max_input_shape**. Default 1.
    +2531            max_input_shape: Limits the derived block shapes.
    +2532                Each axis for which the input size, parameterized by `n`, is larger
    +2533                than **max_input_shape** is set to the minimal value `n_min` for which
    +2534                this is still true.
    +2535                Use this for small input samples or large values of **ns**.
    +2536                Or simply whenever you know the full input shape.
    +2537
    +2538        Returns:
    +2539            Resolved axis sizes for model inputs and outputs.
    +2540        """
    +2541        max_input_shape = max_input_shape or {}
    +2542        if batch_size is None:
    +2543            for (_t_id, a_id), s in max_input_shape.items():
    +2544                if a_id == BATCH_AXIS_ID:
    +2545                    batch_size = s
    +2546                    break
    +2547            else:
    +2548                batch_size = 1
    +2549
    +2550        all_axes = {
    +2551            t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs)
    +2552        }
    +2553
    +2554        inputs: Dict[Tuple[TensorId, AxisId], int] = {}
    +2555        outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {}
    +2556
    +2557        def get_axis_size(a: Union[InputAxis, OutputAxis]):
    +2558            if isinstance(a, BatchAxis):
    +2559                if (t_descr.id, a.id) in ns:
    +2560                    logger.warning(
    +2561                        "Ignoring unexpected size increment factor (n) for batch axis"
    +2562                        + " of tensor '{}'.",
    +2563                        t_descr.id,
    +2564                    )
    +2565                return batch_size
    +2566            elif isinstance(a.size, int):
    +2567                if (t_descr.id, a.id) in ns:
    +2568                    logger.warning(
    +2569                        "Ignoring unexpected size increment factor (n) for fixed size"
    +2570                        + " axis '{}' of tensor '{}'.",
    +2571                        a.id,
    +2572                        t_descr.id,
    +2573                    )
    +2574                return a.size
    +2575            elif isinstance(a.size, ParameterizedSize):
    +2576                if (t_descr.id, a.id) not in ns:
    +2577                    raise ValueError(
    +2578                        "Size increment factor (n) missing for parametrized axis"
    +2579                        + f" '{a.id}' of tensor '{t_descr.id}'."
    +2580                    )
    +2581                n = ns[(t_descr.id, a.id)]
    +2582                s_max = max_input_shape.get((t_descr.id, a.id))
    +2583                if s_max is not None:
    +2584                    n = min(n, a.size.get_n(s_max))
    +2585
    +2586                return a.size.get_size(n)
    +2587
    +2588            elif isinstance(a.size, SizeReference):
    +2589                if (t_descr.id, a.id) in ns:
    +2590                    logger.warning(
    +2591                        "Ignoring unexpected size increment factor (n) for axis '{}'"
    +2592                        + " of tensor '{}' with size reference.",
    +2593                        a.id,
    +2594                        t_descr.id,
    +2595                    )
    +2596                assert not isinstance(a, BatchAxis)
    +2597                ref_axis = all_axes[a.size.tensor_id][a.size.axis_id]
    +2598                assert not isinstance(ref_axis, BatchAxis)
    +2599                ref_key = (a.size.tensor_id, a.size.axis_id)
    +2600                ref_size = inputs.get(ref_key, outputs.get(ref_key))
    +2601                assert ref_size is not None, ref_key
    +2602                assert not isinstance(ref_size, _DataDepSize), ref_key
    +2603                return a.size.get_size(
    +2604                    axis=a,
    +2605                    ref_axis=ref_axis,
    +2606                    ref_size=ref_size,
    +2607                )
    +2608            elif isinstance(a.size, DataDependentSize):
    +2609                if (t_descr.id, a.id) in ns:
    +2610                    logger.warning(
    +2611                        "Ignoring unexpected increment factor (n) for data dependent"
    +2612                        + " size axis '{}' of tensor '{}'.",
    +2613                        a.id,
    +2614                        t_descr.id,
    +2615                    )
    +2616                return _DataDepSize(a.size.min, a.size.max)
    +2617            else:
    +2618                assert_never(a.size)
    +2619
    +2620        # first resolve all , but the `SizeReference` input sizes
    +2621        for t_descr in self.inputs:
    +2622            for a in t_descr.axes:
    +2623                if not isinstance(a.size, SizeReference):
    +2624                    s = get_axis_size(a)
    +2625                    assert not isinstance(s, _DataDepSize)
    +2626                    inputs[t_descr.id, a.id] = s
    +2627
    +2628        # resolve all other input axis sizes
    +2629        for t_descr in self.inputs:
    +2630            for a in t_descr.axes:
    +2631                if isinstance(a.size, SizeReference):
    +2632                    s = get_axis_size(a)
    +2633                    assert not isinstance(s, _DataDepSize)
    +2634                    inputs[t_descr.id, a.id] = s
    +2635
    +2636        # resolve all output axis sizes
    +2637        for t_descr in self.outputs:
    +2638            for a in t_descr.axes:
    +2639                assert not isinstance(a.size, ParameterizedSize)
    +2640                s = get_axis_size(a)
    +2641                outputs[t_descr.id, a.id] = s
    +2642
    +2643        return _AxisSizes(inputs=inputs, outputs=outputs)
    +2644
    +2645    @model_validator(mode="before")
    +2646    @classmethod
    +2647    def _convert(cls, data: Dict[str, Any]) -> Dict[str, Any]:
    +2648        if (
    +2649            data.get("type") == "model"
    +2650            and isinstance(fv := data.get("format_version"), str)
    +2651            and fv.count(".") == 2
    +2652        ):
    +2653            fv_parts = fv.split(".")
    +2654            if any(not p.isdigit() for p in fv_parts):
    +2655                return data
    +2656
    +2657            fv_tuple = tuple(map(int, fv_parts))
    +2658
    +2659            assert cls.implemented_format_version_tuple[0:2] == (0, 5)
    +2660            if fv_tuple[:2] in ((0, 3), (0, 4)):
    +2661                m04 = _ModelDescr_v0_4.load(data)
    +2662                if not isinstance(m04, InvalidDescr):
    +2663                    return _model_conv.convert_as_dict(m04)
    +2664            elif fv_tuple[:2] == (0, 5):
    +2665                # bump patch version
    +2666                data["format_version"] = cls.implemented_format_version
    +2667
    +2668        return data
    +2669
    +2670
    +2671class _ModelConv(Converter[_ModelDescr_v0_4, ModelDescr]):
    +2672    def _convert(
    +2673        self, src: _ModelDescr_v0_4, tgt: "type[ModelDescr] | type[dict[str, Any]]"
    +2674    ) -> "ModelDescr | dict[str, Any]":
    +2675        name = "".join(
    +2676            c if c in string.ascii_letters + string.digits + "_- ()" else " "
    +2677            for c in src.name
    +2678        )
    +2679
    +2680        def conv_authors(auths: Optional[Sequence[_Author_v0_4]]):
    +2681            conv = (
    +2682                _author_conv.convert if TYPE_CHECKING else _author_conv.convert_as_dict
    +2683            )
    +2684            return None if auths is None else [conv(a) for a in auths]
    +2685
    +2686        if TYPE_CHECKING:
    +2687            arch_file_conv = _arch_file_conv.convert
    +2688            arch_lib_conv = _arch_lib_conv.convert
    +2689        else:
    +2690            arch_file_conv = _arch_file_conv.convert_as_dict
    +2691            arch_lib_conv = _arch_lib_conv.convert_as_dict
    +2692
    +2693        input_size_refs = {
    +2694            ipt.name: {
    +2695                a: s
    +2696                for a, s in zip(
    +2697                    ipt.axes,
    +2698                    (
    +2699                        ipt.shape.min
    +2700                        if isinstance(ipt.shape, _ParameterizedInputShape_v0_4)
    +2701                        else ipt.shape
    +2702                    ),
    +2703                )
    +2704            }
    +2705            for ipt in src.inputs
    +2706            if ipt.shape
    +2707        }
    +2708        output_size_refs = {
    +2709            **{
    +2710                out.name: {a: s for a, s in zip(out.axes, out.shape)}
    +2711                for out in src.outputs
    +2712                if not isinstance(out.shape, _ImplicitOutputShape_v0_4)
    +2713            },
    +2714            **input_size_refs,
    +2715        }
    +2716
    +2717        return tgt(
    +2718            attachments=(
    +2719                []
    +2720                if src.attachments is None
    +2721                else [FileDescr(source=f) for f in src.attachments.files]
    +2722            ),
    +2723            authors=[
    +2724                _author_conv.convert_as_dict(a) for a in src.authors
    +2725            ],  # pyright: ignore[reportArgumentType]
    +2726            cite=[
    +2727                {"text": c.text, "doi": c.doi, "url": c.url} for c in src.cite
    +2728            ],  # pyright: ignore[reportArgumentType]
    +2729            config=src.config,
    +2730            covers=src.covers,
    +2731            description=src.description,
    +2732            documentation=src.documentation,
    +2733            format_version="0.5.3",
    +2734            git_repo=src.git_repo,  # pyright: ignore[reportArgumentType]
    +2735            icon=src.icon,
    +2736            id=None if src.id is None else ModelId(src.id),
    +2737            id_emoji=src.id_emoji,
    +2738            license=src.license,  # type: ignore
    +2739            links=src.links,
    +2740            maintainers=[
    +2741                _maintainer_conv.convert_as_dict(m) for m in src.maintainers
    +2742            ],  # pyright: ignore[reportArgumentType]
    +2743            name=name,
    +2744            tags=src.tags,
    +2745            type=src.type,
    +2746            uploader=src.uploader,
    +2747            version=src.version,
    +2748            inputs=[  # pyright: ignore[reportArgumentType]
    +2749                _input_tensor_conv.convert_as_dict(ipt, tt, st, input_size_refs)
    +2750                for ipt, tt, st, in zip(
    +2751                    src.inputs,
    +2752                    src.test_inputs,
    +2753                    src.sample_inputs or [None] * len(src.test_inputs),
    +2754                )
    +2755            ],
    +2756            outputs=[  # pyright: ignore[reportArgumentType]
    +2757                _output_tensor_conv.convert_as_dict(out, tt, st, output_size_refs)
    +2758                for out, tt, st, in zip(
    +2759                    src.outputs,
    +2760                    src.test_outputs,
    +2761                    src.sample_outputs or [None] * len(src.test_outputs),
    +2762                )
    +2763            ],
    +2764            parent=(
    +2765                None
    +2766                if src.parent is None
    +2767                else LinkedModel(
    +2768                    id=ModelId(
    +2769                        str(src.parent.id)
    +2770                        + (
    +2771                            ""
    +2772                            if src.parent.version_number is None
    +2773                            else f"/{src.parent.version_number}"
    +2774                        )
    +2775                    )
    +2776                )
    +2777            ),
    +2778            training_data=(
    +2779                None
    +2780                if src.training_data is None
    +2781                else (
    +2782                    LinkedDataset(
    +2783                        id=DatasetId(
    +2784                            str(src.training_data.id)
    +2785                            + (
    +2786                                ""
    +2787                                if src.training_data.version_number is None
    +2788                                else f"/{src.training_data.version_number}"
    +2789                            )
    +2790                        )
    +2791                    )
    +2792                    if isinstance(src.training_data, LinkedDataset02)
    +2793                    else src.training_data
    +2794                )
    +2795            ),
    +2796            packaged_by=[
    +2797                _author_conv.convert_as_dict(a) for a in src.packaged_by
    +2798            ],  # pyright: ignore[reportArgumentType]
    +2799            run_mode=src.run_mode,
    +2800            timestamp=src.timestamp,
    +2801            weights=(WeightsDescr if TYPE_CHECKING else dict)(
    +2802                keras_hdf5=(w := src.weights.keras_hdf5)
    +2803                and (KerasHdf5WeightsDescr if TYPE_CHECKING else dict)(
    +2804                    authors=conv_authors(w.authors),
    +2805                    source=w.source,
    +2806                    tensorflow_version=w.tensorflow_version or Version("1.15"),
    +2807                    parent=w.parent,
    +2808                ),
    +2809                onnx=(w := src.weights.onnx)
    +2810                and (OnnxWeightsDescr if TYPE_CHECKING else dict)(
    +2811                    source=w.source,
    +2812                    authors=conv_authors(w.authors),
    +2813                    parent=w.parent,
    +2814                    opset_version=w.opset_version or 15,
    +2815                ),
    +2816                pytorch_state_dict=(w := src.weights.pytorch_state_dict)
    +2817                and (PytorchStateDictWeightsDescr if TYPE_CHECKING else dict)(
    +2818                    source=w.source,
    +2819                    authors=conv_authors(w.authors),
    +2820                    parent=w.parent,
    +2821                    architecture=(
    +2822                        arch_file_conv(
    +2823                            w.architecture,
    +2824                            w.architecture_sha256,
    +2825                            w.kwargs,
    +2826                        )
    +2827                        if isinstance(w.architecture, _CallableFromFile_v0_4)
    +2828                        else arch_lib_conv(w.architecture, w.kwargs)
    +2829                    ),
    +2830                    pytorch_version=w.pytorch_version or Version("1.10"),
    +2831                    dependencies=(
    +2832                        None
    +2833                        if w.dependencies is None
    +2834                        else (EnvironmentFileDescr if TYPE_CHECKING else dict)(
    +2835                            source=cast(
    +2836                                ImportantFileSource,
    +2837                                str(deps := w.dependencies)[
    +2838                                    (
    +2839                                        len("conda:")
    +2840                                        if str(deps).startswith("conda:")
    +2841                                        else 0
    +2842                                    ) :
    +2843                                ],
    +2844                            )
    +2845                        )
    +2846                    ),
    +2847                ),
    +2848                tensorflow_js=(w := src.weights.tensorflow_js)
    +2849                and (TensorflowJsWeightsDescr if TYPE_CHECKING else dict)(
    +2850                    source=w.source,
    +2851                    authors=conv_authors(w.authors),
    +2852                    parent=w.parent,
    +2853                    tensorflow_version=w.tensorflow_version or Version("1.15"),
    +2854                ),
    +2855                tensorflow_saved_model_bundle=(
    +2856                    w := src.weights.tensorflow_saved_model_bundle
    +2857                )
    +2858                and (TensorflowSavedModelBundleWeightsDescr if TYPE_CHECKING else dict)(
    +2859                    authors=conv_authors(w.authors),
    +2860                    parent=w.parent,
    +2861                    source=w.source,
    +2862                    tensorflow_version=w.tensorflow_version or Version("1.15"),
    +2863                    dependencies=(
    +2864                        None
    +2865                        if w.dependencies is None
    +2866                        else (EnvironmentFileDescr if TYPE_CHECKING else dict)(
    +2867                            source=cast(
    +2868                                ImportantFileSource,
    +2869                                (
    +2870                                    str(w.dependencies)[len("conda:") :]
    +2871                                    if str(w.dependencies).startswith("conda:")
    +2872                                    else str(w.dependencies)
    +2873                                ),
    +2874                            )
    +2875                        )
    +2876                    ),
    +2877                ),
    +2878                torchscript=(w := src.weights.torchscript)
    +2879                and (TorchscriptWeightsDescr if TYPE_CHECKING else dict)(
    +2880                    source=w.source,
    +2881                    authors=conv_authors(w.authors),
    +2882                    parent=w.parent,
    +2883                    pytorch_version=w.pytorch_version or Version("1.10"),
    +2884                ),
    +2885            ),
    +2886        )
    +2887
    +2888
    +2889_model_conv = _ModelConv(_ModelDescr_v0_4, ModelDescr)
    +2890
    +2891
    +2892# create better cover images for 3d data and non-image outputs
    +2893def generate_covers(
    +2894    inputs: Sequence[Tuple[InputTensorDescr, NDArray[Any]]],
    +2895    outputs: Sequence[Tuple[OutputTensorDescr, NDArray[Any]]],
    +2896) -> List[Path]:
    +2897    def squeeze(
    +2898        data: NDArray[Any], axes: Sequence[AnyAxis]
    +2899    ) -> Tuple[NDArray[Any], List[AnyAxis]]:
    +2900        """apply numpy.ndarray.squeeze while keeping track of the axis descriptions remaining"""
    +2901        if data.ndim != len(axes):
    +2902            raise ValueError(
    +2903                f"tensor shape {data.shape} does not match described axes"
    +2904                + f" {[a.id for a in axes]}"
    +2905            )
    +2906
    +2907        axes = [deepcopy(a) for a, s in zip(axes, data.shape) if s != 1]
    +2908        return data.squeeze(), axes
    +2909
    +2910    def normalize(
    +2911        data: NDArray[Any], axis: Optional[Tuple[int, ...]], eps: float = 1e-7
    +2912    ) -> NDArray[np.float32]:
    +2913        data = data.astype("float32")
    +2914        data -= data.min(axis=axis, keepdims=True)
    +2915        data /= data.max(axis=axis, keepdims=True) + eps
    +2916        return data
    +2917
    +2918    def to_2d_image(data: NDArray[Any], axes: Sequence[AnyAxis]):
    +2919        original_shape = data.shape
    +2920        data, axes = squeeze(data, axes)
    +2921
    +2922        # take slice fom any batch or index axis if needed
    +2923        # and convert the first channel axis and take a slice from any additional channel axes
    +2924        slices: Tuple[slice, ...] = ()
    +2925        ndim = data.ndim
    +2926        ndim_need = 3 if any(isinstance(a, ChannelAxis) for a in axes) else 2
    +2927        has_c_axis = False
    +2928        for i, a in enumerate(axes):
    +2929            s = data.shape[i]
    +2930            assert s > 1
    +2931            if (
    +2932                isinstance(a, (BatchAxis, IndexInputAxis, IndexOutputAxis))
    +2933                and ndim > ndim_need
    +2934            ):
    +2935                data = data[slices + (slice(s // 2 - 1, s // 2),)]
    +2936                ndim -= 1
    +2937            elif isinstance(a, ChannelAxis):
    +2938                if has_c_axis:
    +2939                    # second channel axis
    +2940                    data = data[slices + (slice(0, 1),)]
    +2941                    ndim -= 1
    +2942                else:
    +2943                    has_c_axis = True
    +2944                    if s == 2:
    +2945                        # visualize two channels with cyan and magenta
    +2946                        data = np.concatenate(
    +2947                            [
    +2948                                data[slices + (slice(1, 2),)],
    +2949                                data[slices + (slice(0, 1),)],
    +2950                                (
    +2951                                    data[slices + (slice(0, 1),)]
    +2952                                    + data[slices + (slice(1, 2),)]
    +2953                                )
    +2954                                / 2,  # TODO: take maximum instead?
    +2955                            ],
    +2956                            axis=i,
    +2957                        )
    +2958                    elif data.shape[i] == 3:
    +2959                        pass  # visualize 3 channels as RGB
    +2960                    else:
    +2961                        # visualize first 3 channels as RGB
    +2962                        data = data[slices + (slice(3),)]
    +2963
    +2964                    assert data.shape[i] == 3
    +2965
    +2966            slices += (slice(None),)
    +2967
    +2968        data, axes = squeeze(data, axes)
    +2969        assert len(axes) == ndim
    +2970        # take slice from z axis if needed
    +2971        slices = ()
    +2972        if ndim > ndim_need:
    +2973            for i, a in enumerate(axes):
    +2974                s = data.shape[i]
    +2975                if a.id == AxisId("z"):
    +2976                    data = data[slices + (slice(s // 2 - 1, s // 2),)]
    +2977                    data, axes = squeeze(data, axes)
    +2978                    ndim -= 1
    +2979                    break
    +2980
    +2981            slices += (slice(None),)
    +2982
    +2983        # take slice from any space or time axis
    +2984        slices = ()
    +2985
    +2986        for i, a in enumerate(axes):
    +2987            if ndim <= ndim_need:
    +2988                break
    +2989
    +2990            s = data.shape[i]
    +2991            assert s > 1
    +2992            if isinstance(
    +2993                a, (SpaceInputAxis, SpaceOutputAxis, TimeInputAxis, TimeOutputAxis)
    +2994            ):
    +2995                data = data[slices + (slice(s // 2 - 1, s // 2),)]
    +2996                ndim -= 1
    +2997
    +2998            slices += (slice(None),)
    +2999
    +3000        del slices
    +3001        data, axes = squeeze(data, axes)
    +3002        assert len(axes) == ndim
    +3003
    +3004        if (has_c_axis and ndim != 3) or ndim != 2:
    +3005            raise ValueError(
    +3006                f"Failed to construct cover image from shape {original_shape}"
    +3007            )
    +3008
    +3009        if not has_c_axis:
    +3010            assert ndim == 2
    +3011            data = np.repeat(data[:, :, None], 3, axis=2)
    +3012            axes.append(ChannelAxis(channel_names=list(map(Identifier, "RGB"))))
    +3013            ndim += 1
    +3014
    +3015        assert ndim == 3
    +3016
    +3017        # transpose axis order such that longest axis comes first...
    +3018        axis_order = list(np.argsort(list(data.shape)))
    +3019        axis_order.reverse()
    +3020        # ... and channel axis is last
    +3021        c = [i for i in range(3) if isinstance(axes[i], ChannelAxis)][0]
    +3022        axis_order.append(axis_order.pop(c))
    +3023        axes = [axes[ao] for ao in axis_order]
    +3024        data = data.transpose(axis_order)
    +3025
    +3026        # h, w = data.shape[:2]
    +3027        # if h / w  in (1.0 or 2.0):
    +3028        #     pass
    +3029        # elif h / w < 2:
    +3030        # TODO: enforce 2:1 or 1:1 aspect ratio for generated cover images
    +3031
    +3032        norm_along = (
    +3033            tuple(i for i, a in enumerate(axes) if a.type in ("space", "time")) or None
    +3034        )
    +3035        # normalize the data and map to 8 bit
    +3036        data = normalize(data, norm_along)
    +3037        data = (data * 255).astype("uint8")
    +3038
    +3039        return data
    +3040
    +3041    def create_diagonal_split_image(im0: NDArray[Any], im1: NDArray[Any]):
    +3042        assert im0.dtype == im1.dtype == np.uint8
    +3043        assert im0.shape == im1.shape
    +3044        assert im0.ndim == 3
    +3045        N, M, C = im0.shape
    +3046        assert C == 3
    +3047        out = np.ones((N, M, C), dtype="uint8")
    +3048        for c in range(C):
    +3049            outc = np.tril(im0[..., c])
    +3050            mask = outc == 0
    +3051            outc[mask] = np.triu(im1[..., c])[mask]
    +3052            out[..., c] = outc
    +3053
    +3054        return out
    +3055
    +3056    ipt_descr, ipt = inputs[0]
    +3057    out_descr, out = outputs[0]
    +3058
    +3059    ipt_img = to_2d_image(ipt, ipt_descr.axes)
    +3060    out_img = to_2d_image(out, out_descr.axes)
    +3061
    +3062    cover_folder = Path(mkdtemp())
    +3063    if ipt_img.shape == out_img.shape:
    +3064        covers = [cover_folder / "cover.png"]
    +3065        imwrite(covers[0], create_diagonal_split_image(ipt_img, out_img))
    +3066    else:
    +3067        covers = [cover_folder / "input.png", cover_folder / "output.png"]
    +3068        imwrite(covers[0], ipt_img)
    +3069        imwrite(covers[1], out_img)
    +3070
    +3071    return covers
    +
    + + +
    +
    +
    + SpaceUnit = + + typing.Literal['attometer', 'angstrom', 'centimeter', 'decimeter', 'exameter', 'femtometer', 'foot', 'gigameter', 'hectometer', 'inch', 'kilometer', 'megameter', 'meter', 'micrometer', 'mile', 'millimeter', 'nanometer', 'parsec', 'petameter', 'picometer', 'terameter', 'yard', 'yoctometer', 'yottameter', 'zeptometer', 'zettameter'] + + +
    + + + + +
    +
    +
    + TimeUnit = + + typing.Literal['attosecond', 'centisecond', 'day', 'decisecond', 'exasecond', 'femtosecond', 'gigasecond', 'hectosecond', 'hour', 'kilosecond', 'megasecond', 'microsecond', 'millisecond', 'minute', 'nanosecond', 'petasecond', 'picosecond', 'second', 'terasecond', 'yoctosecond', 'yottasecond', 'zeptosecond', 'zettasecond'] + + +
    + + + + +
    +
    +
    + AxisType = +typing.Literal['batch', 'channel', 'index', 'time', 'space'] + + +
    + + + + +
    +
    + +
    + + class + TensorId(bioimageio.spec._internal.types.LowerCaseIdentifier): + + + +
    + +
    193class TensorId(LowerCaseIdentifier):
    +194    root_model: ClassVar[Type[RootModel[Any]]] = RootModel[
    +195        Annotated[LowerCaseIdentifierAnno, MaxLen(32)]
    +196    ]
    +
    + + +

    str(object='') -> str +str(bytes_or_buffer[, encoding[, errors]]) -> str

    + +

    Create a new string object from the given object. If encoding or +errors is specified, then the object must expose a data buffer +that will be decoded using the given encoding and error handler. +Otherwise, returns the result of object.__str__() (if defined) +or repr(object). +encoding defaults to sys.getdefaultencoding(). +errors defaults to 'strict'.

    +
    + + +
    +
    + root_model: ClassVar[Type[pydantic.root_model.RootModel[Any]]] = + + <class 'pydantic.root_model.RootModel[Annotated[str, MinLen, AfterValidator, AfterValidator, Annotated[TypeVar, Predicate], MaxLen]]'> + + +
    + + +

    the pydantic root model to validate the string

    +
    + + +
    +
    +
    + +
    + + class + AxisId(bioimageio.spec._internal.types.LowerCaseIdentifier): + + + +
    + +
    199class AxisId(LowerCaseIdentifier):
    +200    root_model: ClassVar[Type[RootModel[Any]]] = RootModel[
    +201        Annotated[LowerCaseIdentifierAnno, MaxLen(16)]
    +202    ]
    +
    + + +

    str(object='') -> str +str(bytes_or_buffer[, encoding[, errors]]) -> str

    + +

    Create a new string object from the given object. If encoding or +errors is specified, then the object must expose a data buffer +that will be decoded using the given encoding and error handler. +Otherwise, returns the result of object.__str__() (if defined) +or repr(object). +encoding defaults to sys.getdefaultencoding(). +errors defaults to 'strict'.

    +
    + + +
    +
    + root_model: ClassVar[Type[pydantic.root_model.RootModel[Any]]] = + + <class 'pydantic.root_model.RootModel[Annotated[str, MinLen, AfterValidator, AfterValidator, Annotated[TypeVar, Predicate], MaxLen]]'> + + +
    + + +

    the pydantic root model to validate the string

    +
    + + +
    +
    +
    +
    + NonBatchAxisId = +typing.Annotated[AxisId, Predicate(_is_not_batch)] + + +
    + + + + +
    +
    +
    + PostprocessingId = + + typing.Literal['binarize', 'clip', 'ensure_dtype', 'fixed_zero_mean_unit_variance', 'scale_linear', 'scale_mean_variance', 'scale_range', 'sigmoid', 'zero_mean_unit_variance'] + + +
    + + + + +
    +
    +
    + PreprocessingId = + + typing.Literal['binarize', 'clip', 'ensure_dtype', 'scale_linear', 'sigmoid', 'zero_mean_unit_variance', 'scale_range'] + + +
    + + + + +
    +
    +
    + SAME_AS_TYPE = +'<same as type>' + + +
    + + + + +
    +
    +
    + ParameterizedSize_N = +<class 'int'> + + +
    + + + + +
    +
    + +
    + + class + ParameterizedSize(bioimageio.spec._internal.node.Node): + + + +
    + +
    243class ParameterizedSize(Node):
    +244    """Describes a range of valid tensor axis sizes as `size = min + n*step`."""
    +245
    +246    N: ClassVar[Type[int]] = ParameterizedSize_N
    +247    """integer to parameterize this axis"""
    +248
    +249    min: Annotated[int, Gt(0)]
    +250    step: Annotated[int, Gt(0)]
    +251
    +252    def validate_size(self, size: int) -> int:
    +253        if size < self.min:
    +254            raise ValueError(f"size {size} < {self.min}")
    +255        if (size - self.min) % self.step != 0:
    +256            raise ValueError(
    +257                f"axis of size {size} is not parameterized by `min + n*step` ="
    +258                + f" `{self.min} + n*{self.step}`"
    +259            )
    +260
    +261        return size
    +262
    +263    def get_size(self, n: ParameterizedSize_N) -> int:
    +264        return self.min + self.step * n
    +265
    +266    def get_n(self, s: int) -> ParameterizedSize_N:
    +267        """return smallest n parameterizing a size greater or equal than `s`"""
    +268        return ceil((s - self.min) / self.step)
    +
    + + +

    Describes a range of valid tensor axis sizes as size = min + n*step.

    +
    + + +
    +
    + N: ClassVar[Type[int]] = +<class 'int'> + + +
    + + +

    integer to parameterize this axis

    +
    + + +
    +
    +
    + min: Annotated[int, Gt(gt=0)] + + +
    + + + + +
    +
    +
    + step: Annotated[int, Gt(gt=0)] + + +
    + + + + +
    +
    + +
    + + def + validate_size(self, size: int) -> int: + + + +
    + +
    252    def validate_size(self, size: int) -> int:
    +253        if size < self.min:
    +254            raise ValueError(f"size {size} < {self.min}")
    +255        if (size - self.min) % self.step != 0:
    +256            raise ValueError(
    +257                f"axis of size {size} is not parameterized by `min + n*step` ="
    +258                + f" `{self.min} + n*{self.step}`"
    +259            )
    +260
    +261        return size
    +
    + + + + +
    +
    + +
    + + def + get_size(self, n: int) -> int: + + + +
    + +
    263    def get_size(self, n: ParameterizedSize_N) -> int:
    +264        return self.min + self.step * n
    +
    + + + + +
    +
    + +
    + + def + get_n(self, s: int) -> int: + + + +
    + +
    266    def get_n(self, s: int) -> ParameterizedSize_N:
    +267        """return smallest n parameterizing a size greater or equal than `s`"""
    +268        return ceil((s - self.min) / self.step)
    +
    + + +

    return smallest n parameterizing a size greater or equal than s

    +
    + + +
    +
    +
    + +
    + + class + DataDependentSize(bioimageio.spec._internal.node.Node): + + + +
    + +
    271class DataDependentSize(Node):
    +272    min: Annotated[int, Gt(0)] = 1
    +273    max: Annotated[Optional[int], Gt(1)] = None
    +274
    +275    @model_validator(mode="after")
    +276    def _validate_max_gt_min(self):
    +277        if self.max is not None and self.min >= self.max:
    +278            raise ValueError(f"expected `min` < `max`, but got {self.min}, {self.max}")
    +279
    +280        return self
    +281
    +282    def validate_size(self, size: int) -> int:
    +283        if size < self.min:
    +284            raise ValueError(f"size {size} < {self.min}")
    +285
    +286        if self.max is not None and size > self.max:
    +287            raise ValueError(f"size {size} > {self.max}")
    +288
    +289        return size
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + min: Annotated[int, Gt(gt=0)] + + +
    + + + + +
    +
    +
    + max: Annotated[Optional[int], Gt(gt=1)] + + +
    + + + + +
    +
    + +
    + + def + validate_size(self, size: int) -> int: + + + +
    + +
    282    def validate_size(self, size: int) -> int:
    +283        if size < self.min:
    +284            raise ValueError(f"size {size} < {self.min}")
    +285
    +286        if self.max is not None and size > self.max:
    +287            raise ValueError(f"size {size} > {self.max}")
    +288
    +289        return size
    +
    + + + + +
    +
    +
    + +
    + + class + SizeReference(bioimageio.spec._internal.node.Node): + + + +
    + +
    292class SizeReference(Node):
    +293    """A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.
    +294
    +295    `axis.size = reference.size * reference.scale / axis.scale + offset`
    +296
    +297    note:
    +298    1. The axis and the referenced axis need to have the same unit (or no unit).
    +299    2. Batch axes may not be referenced.
    +300    3. Fractions are rounded down.
    +301    4. If the reference axis is `concatenable` the referencing axis is assumed to be
    +302        `concatenable` as well with the same block order.
    +303
    +304    example:
    +305    An unisotropic input image of w*h=100*49 pixels depicts a phsical space of 200*196mm².
    +306    Let's assume that we want to express the image height h in relation to its width w
    +307    instead of only accepting input images of exactly 100*49 pixels
    +308    (for example to express a range of valid image shapes by parametrizing w, see `ParameterizedSize`).
    +309
    +310    >>> w = SpaceInputAxis(id=AxisId("w"), size=100, unit="millimeter", scale=2)
    +311    >>> h = SpaceInputAxis(
    +312    ...     id=AxisId("h"),
    +313    ...     size=SizeReference(tensor_id=TensorId("input"), axis_id=AxisId("w"), offset=-1),
    +314    ...     unit="millimeter",
    +315    ...     scale=4,
    +316    ... )
    +317    >>> print(h.size.compute(h, w))
    +318    49
    +319
    +320    -> h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49
    +321    """
    +322
    +323    tensor_id: TensorId
    +324    """tensor id of the reference axis"""
    +325
    +326    axis_id: AxisId
    +327    """axis id of the reference axis"""
    +328
    +329    offset: int = 0
    +330
    +331    def get_size(
    +332        self,
    +333        axis: Union[
    +334            ChannelAxis,
    +335            IndexInputAxis,
    +336            IndexOutputAxis,
    +337            TimeInputAxis,
    +338            SpaceInputAxis,
    +339            TimeOutputAxis,
    +340            TimeOutputAxisWithHalo,
    +341            SpaceOutputAxis,
    +342            SpaceOutputAxisWithHalo,
    +343        ],
    +344        ref_axis: Union[
    +345            ChannelAxis,
    +346            IndexInputAxis,
    +347            IndexOutputAxis,
    +348            TimeInputAxis,
    +349            SpaceInputAxis,
    +350            TimeOutputAxis,
    +351            TimeOutputAxisWithHalo,
    +352            SpaceOutputAxis,
    +353            SpaceOutputAxisWithHalo,
    +354        ],
    +355        n: ParameterizedSize_N = 0,
    +356        ref_size: Optional[int] = None,
    +357    ):
    +358        """Compute the concrete size for a given axis and its reference axis.
    +359
    +360        Args:
    +361            axis: The axis this `SizeReference` is the size of.
    +362            ref_axis: The reference axis to compute the size from.
    +363            n: If the **ref_axis** is parameterized (of type `ParameterizedSize`)
    +364                and no fixed **ref_size** is given,
    +365                **n** is used to compute the size of the parameterized **ref_axis**.
    +366            ref_size: Overwrite the reference size instead of deriving it from
    +367                **ref_axis**
    +368                (**ref_axis.scale** is still used; any given **n** is ignored).
    +369        """
    +370        assert (
    +371            axis.size == self
    +372        ), "Given `axis.size` is not defined by this `SizeReference`"
    +373
    +374        assert (
    +375            ref_axis.id == self.axis_id
    +376        ), f"Expected `ref_axis.id` to be {self.axis_id}, but got {ref_axis.id}."
    +377
    +378        assert axis.unit == ref_axis.unit, (
    +379            "`SizeReference` requires `axis` and `ref_axis` to have the same `unit`,"
    +380            f" but {axis.unit}!={ref_axis.unit}"
    +381        )
    +382        if ref_size is None:
    +383            if isinstance(ref_axis.size, (int, float)):
    +384                ref_size = ref_axis.size
    +385            elif isinstance(ref_axis.size, ParameterizedSize):
    +386                ref_size = ref_axis.size.get_size(n)
    +387            elif isinstance(ref_axis.size, DataDependentSize):
    +388                raise ValueError(
    +389                    "Reference axis referenced in `SizeReference` may not be a `DataDependentSize`."
    +390                )
    +391            elif isinstance(ref_axis.size, SizeReference):
    +392                raise ValueError(
    +393                    "Reference axis referenced in `SizeReference` may not be sized by a"
    +394                    + " `SizeReference` itself."
    +395                )
    +396            else:
    +397                assert_never(ref_axis.size)
    +398
    +399        return int(ref_size * ref_axis.scale / axis.scale + self.offset)
    +400
    +401    @staticmethod
    +402    def _get_unit(
    +403        axis: Union[
    +404            ChannelAxis,
    +405            IndexInputAxis,
    +406            IndexOutputAxis,
    +407            TimeInputAxis,
    +408            SpaceInputAxis,
    +409            TimeOutputAxis,
    +410            TimeOutputAxisWithHalo,
    +411            SpaceOutputAxis,
    +412            SpaceOutputAxisWithHalo,
    +413        ],
    +414    ):
    +415        return axis.unit
    +
    + + +

    A tensor axis size (extent in pixels/frames) defined in relation to a reference axis.

    + +

    axis.size = reference.size * reference.scale / axis.scale + offset

    + +

    note:

    + +
      +
    1. The axis and the referenced axis need to have the same unit (or no unit).
    2. +
    3. Batch axes may not be referenced.
    4. +
    5. Fractions are rounded down.
    6. +
    7. If the reference axis is concatenable the referencing axis is assumed to be +concatenable as well with the same block order.
    8. +
    + +

    example: +An unisotropic input image of wh=10049 pixels depicts a phsical space of 200196mm². +Let's assume that we want to express the image height h in relation to its width w +instead of only accepting input images of exactly 10049 pixels +(for example to express a range of valid image shapes by parametrizing w, see ParameterizedSize).

    + +
    +
    >>> w = SpaceInputAxis(id=AxisId("w"), size=100, unit="millimeter", scale=2)
    +>>> h = SpaceInputAxis(
    +...     id=AxisId("h"),
    +...     size=SizeReference(tensor_id=TensorId("input"), axis_id=AxisId("w"), offset=-1),
    +...     unit="millimeter",
    +...     scale=4,
    +... )
    +>>> print(h.size.compute(h, w))
    +49
    +
    +
    + +

    -> h = w * w.scale / h.scale + offset = 100 * 2mm / 4mm - 1 = 49

    +
    + + +
    +
    + tensor_id: TensorId + + +
    + + +

    tensor id of the reference axis

    +
    + + +
    +
    +
    + axis_id: AxisId + + +
    + + +

    axis id of the reference axis

    +
    + + +
    +
    +
    + offset: int + + +
    + + + + +
    +
    + + + +
    331    def get_size(
    +332        self,
    +333        axis: Union[
    +334            ChannelAxis,
    +335            IndexInputAxis,
    +336            IndexOutputAxis,
    +337            TimeInputAxis,
    +338            SpaceInputAxis,
    +339            TimeOutputAxis,
    +340            TimeOutputAxisWithHalo,
    +341            SpaceOutputAxis,
    +342            SpaceOutputAxisWithHalo,
    +343        ],
    +344        ref_axis: Union[
    +345            ChannelAxis,
    +346            IndexInputAxis,
    +347            IndexOutputAxis,
    +348            TimeInputAxis,
    +349            SpaceInputAxis,
    +350            TimeOutputAxis,
    +351            TimeOutputAxisWithHalo,
    +352            SpaceOutputAxis,
    +353            SpaceOutputAxisWithHalo,
    +354        ],
    +355        n: ParameterizedSize_N = 0,
    +356        ref_size: Optional[int] = None,
    +357    ):
    +358        """Compute the concrete size for a given axis and its reference axis.
    +359
    +360        Args:
    +361            axis: The axis this `SizeReference` is the size of.
    +362            ref_axis: The reference axis to compute the size from.
    +363            n: If the **ref_axis** is parameterized (of type `ParameterizedSize`)
    +364                and no fixed **ref_size** is given,
    +365                **n** is used to compute the size of the parameterized **ref_axis**.
    +366            ref_size: Overwrite the reference size instead of deriving it from
    +367                **ref_axis**
    +368                (**ref_axis.scale** is still used; any given **n** is ignored).
    +369        """
    +370        assert (
    +371            axis.size == self
    +372        ), "Given `axis.size` is not defined by this `SizeReference`"
    +373
    +374        assert (
    +375            ref_axis.id == self.axis_id
    +376        ), f"Expected `ref_axis.id` to be {self.axis_id}, but got {ref_axis.id}."
    +377
    +378        assert axis.unit == ref_axis.unit, (
    +379            "`SizeReference` requires `axis` and `ref_axis` to have the same `unit`,"
    +380            f" but {axis.unit}!={ref_axis.unit}"
    +381        )
    +382        if ref_size is None:
    +383            if isinstance(ref_axis.size, (int, float)):
    +384                ref_size = ref_axis.size
    +385            elif isinstance(ref_axis.size, ParameterizedSize):
    +386                ref_size = ref_axis.size.get_size(n)
    +387            elif isinstance(ref_axis.size, DataDependentSize):
    +388                raise ValueError(
    +389                    "Reference axis referenced in `SizeReference` may not be a `DataDependentSize`."
    +390                )
    +391            elif isinstance(ref_axis.size, SizeReference):
    +392                raise ValueError(
    +393                    "Reference axis referenced in `SizeReference` may not be sized by a"
    +394                    + " `SizeReference` itself."
    +395                )
    +396            else:
    +397                assert_never(ref_axis.size)
    +398
    +399        return int(ref_size * ref_axis.scale / axis.scale + self.offset)
    +
    + + +

    Compute the concrete size for a given axis and its reference axis.

    + +
    Arguments:
    + +
      +
    • axis: The axis this SizeReference is the size of.
    • +
    • ref_axis: The reference axis to compute the size from.
    • +
    • n: If the ref_axis is parameterized (of type ParameterizedSize) +and no fixed ref_size is given, +n is used to compute the size of the parameterized ref_axis.
    • +
    • ref_size: Overwrite the reference size instead of deriving it from +ref_axis +(ref_axis.scale is still used; any given n is ignored).
    • +
    +
    + + +
    +
    +
    + +
    + + class + AxisBase(bioimageio.spec._internal.common_nodes.NodeWithExplicitlySetFields): + + + +
    + +
    420class AxisBase(NodeWithExplicitlySetFields):
    +421    fields_to_set_explicitly: ClassVar[FrozenSet[LiteralString]] = frozenset({"type"})
    +422
    +423    id: AxisId
    +424    """An axis id unique across all axes of one tensor."""
    +425
    +426    description: Annotated[str, MaxLen(128)] = ""
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + fields_to_set_explicitly: ClassVar[FrozenSet[LiteralString]] = +frozenset({'type'}) + + +
    + + +

    set set these fields explicitly with their default value if they are not set, +such that they are always included even when dumping with 'exlude_unset'

    +
    + + +
    +
    +
    + id: AxisId + + +
    + + +

    An axis id unique across all axes of one tensor.

    +
    + + +
    +
    +
    + description: Annotated[str, MaxLen(max_length=128)] + + +
    + + + + +
    +
    +
    + +
    + + class + WithHalo(bioimageio.spec._internal.node.Node): + + + +
    + +
    429class WithHalo(Node):
    +430    halo: Annotated[int, Ge(1)]
    +431    """The halo should be cropped from the output tensor to avoid boundary effects.
    +432    It is to be cropped from both sides, i.e. `size_after_crop = size - 2 * halo`.
    +433    To document a halo that is already cropped by the model use `size.offset` instead."""
    +434
    +435    size: Annotated[
    +436        SizeReference,
    +437        Field(
    +438            examples=[
    +439                10,
    +440                SizeReference(
    +441                    tensor_id=TensorId("t"), axis_id=AxisId("a"), offset=5
    +442                ).model_dump(mode="json"),
    +443            ]
    +444        ),
    +445    ]
    +446    """reference to another axis with an optional offset (see `SizeReference`)"""
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + halo: Annotated[int, Ge(ge=1)] + + +
    + + +

    The halo should be cropped from the output tensor to avoid boundary effects. +It is to be cropped from both sides, i.e. size_after_crop = size - 2 * halo. +To document a halo that is already cropped by the model use size.offset instead.

    +
    + + +
    +
    +
    + size: Annotated[SizeReference, FieldInfo(annotation=NoneType, required=True, examples=[10, {'tensor_id': 't', 'axis_id': 'a', 'offset': 5}])] + + +
    + + +

    reference to another axis with an optional offset (see SizeReference)

    +
    + + +
    +
    +
    +
    + BATCH_AXIS_ID = +'batch' + + +
    + + + + +
    +
    + +
    + + class + BatchAxis(AxisBase): + + + +
    + +
    452class BatchAxis(AxisBase):
    +453    type: Literal["batch"] = "batch"
    +454    id: Annotated[AxisId, Predicate(_is_batch)] = BATCH_AXIS_ID
    +455    size: Optional[Literal[1]] = None
    +456    """The batch size may be fixed to 1,
    +457    otherwise (the default) it may be chosen arbitrarily depending on available memory"""
    +458
    +459    @property
    +460    def scale(self):
    +461        return 1.0
    +462
    +463    @property
    +464    def concatenable(self):
    +465        return True
    +466
    +467    @property
    +468    def unit(self):
    +469        return None
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type: Literal['batch'] + + +
    + + + + +
    +
    +
    + id: Annotated[AxisId, Predicate(_is_batch)] + + +
    + + +

    An axis id unique across all axes of one tensor.

    +
    + + +
    +
    +
    + size: Optional[Literal[1]] + + +
    + + +

    The batch size may be fixed to 1, +otherwise (the default) it may be chosen arbitrarily depending on available memory

    +
    + + +
    +
    + +
    + scale + + + +
    + +
    459    @property
    +460    def scale(self):
    +461        return 1.0
    +
    + + + + +
    +
    + +
    + concatenable + + + +
    + +
    463    @property
    +464    def concatenable(self):
    +465        return True
    +
    + + + + +
    +
    + +
    + unit + + + +
    + +
    467    @property
    +468    def unit(self):
    +469        return None
    +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + ChannelAxis(AxisBase): + + + +
    + +
    472class ChannelAxis(AxisBase):
    +473    type: Literal["channel"] = "channel"
    +474    id: NonBatchAxisId = AxisId("channel")
    +475    channel_names: NotEmpty[List[Identifier]]
    +476
    +477    @property
    +478    def size(self) -> int:
    +479        return len(self.channel_names)
    +480
    +481    @property
    +482    def concatenable(self):
    +483        return False
    +484
    +485    @property
    +486    def scale(self) -> float:
    +487        return 1.0
    +488
    +489    @property
    +490    def unit(self):
    +491        return None
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type: Literal['channel'] + + +
    + + + + +
    +
    +
    + id: Annotated[AxisId, Predicate(_is_not_batch)] + + +
    + + +

    An axis id unique across all axes of one tensor.

    +
    + + +
    +
    +
    + channel_names: Annotated[List[bioimageio.spec._internal.types.Identifier], MinLen(min_length=1)] + + +
    + + + + +
    +
    + +
    + size: int + + + +
    + +
    477    @property
    +478    def size(self) -> int:
    +479        return len(self.channel_names)
    +
    + + + + +
    +
    + +
    + concatenable + + + +
    + +
    481    @property
    +482    def concatenable(self):
    +483        return False
    +
    + + + + +
    +
    + +
    + scale: float + + + +
    + +
    485    @property
    +486    def scale(self) -> float:
    +487        return 1.0
    +
    + + + + +
    +
    + +
    + unit + + + +
    + +
    489    @property
    +490    def unit(self):
    +491        return None
    +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + IndexAxisBase(AxisBase): + + + +
    + +
    494class IndexAxisBase(AxisBase):
    +495    type: Literal["index"] = "index"
    +496    id: NonBatchAxisId = AxisId("index")
    +497
    +498    @property
    +499    def scale(self) -> float:
    +500        return 1.0
    +501
    +502    @property
    +503    def unit(self):
    +504        return None
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type: Literal['index'] + + +
    + + + + +
    +
    +
    + id: Annotated[AxisId, Predicate(_is_not_batch)] + + +
    + + +

    An axis id unique across all axes of one tensor.

    +
    + + +
    +
    + +
    + scale: float + + + +
    + +
    498    @property
    +499    def scale(self) -> float:
    +500        return 1.0
    +
    + + + + +
    +
    + +
    + unit + + + +
    + +
    502    @property
    +503    def unit(self):
    +504        return None
    +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + IndexInputAxis(IndexAxisBase, _WithInputAxisSize): + + + +
    + +
    527class IndexInputAxis(IndexAxisBase, _WithInputAxisSize):
    +528    concatenable: bool = False
    +529    """If a model has a `concatenable` input axis, it can be processed blockwise,
    +530    splitting a longer sample axis into blocks matching its input tensor description.
    +531    Output axes are concatenable if they have a `SizeReference` to a concatenable
    +532    input axis.
    +533    """
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + concatenable: bool + + +
    + + +

    If a model has a concatenable input axis, it can be processed blockwise, +splitting a longer sample axis into blocks matching its input tensor description. +Output axes are concatenable if they have a SizeReference to a concatenable +input axis.

    +
    + + +
    +
    +
    Inherited Members
    +
    + + + +
    +
    +
    +
    + +
    + + class + IndexOutputAxis(IndexAxisBase): + + + +
    + +
    536class IndexOutputAxis(IndexAxisBase):
    +537    size: Annotated[
    +538        Union[Annotated[int, Gt(0)], SizeReference, DataDependentSize],
    +539        Field(
    +540            examples=[
    +541                10,
    +542                SizeReference(
    +543                    tensor_id=TensorId("t"), axis_id=AxisId("a"), offset=5
    +544                ).model_dump(mode="json"),
    +545            ]
    +546        ),
    +547    ]
    +548    """The size/length of this axis can be specified as
    +549    - fixed integer
    +550    - reference to another axis with an optional offset (`SizeReference`)
    +551    - data dependent size using `DataDependentSize` (size is only known after model inference)
    +552    """
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + size: Annotated[Union[Annotated[int, Gt(gt=0)], SizeReference, DataDependentSize], FieldInfo(annotation=NoneType, required=True, examples=[10, {'tensor_id': 't', 'axis_id': 'a', 'offset': 5}])] + + +
    + + +

    The size/length of this axis can be specified as

    + +
      +
    • fixed integer
    • +
    • reference to another axis with an optional offset (SizeReference)
    • +
    • data dependent size using DataDependentSize (size is only known after model inference)
    • +
    +
    + + +
    +
    +
    Inherited Members
    +
    + + +
    +
    +
    +
    + +
    + + class + TimeAxisBase(AxisBase): + + + +
    + +
    555class TimeAxisBase(AxisBase):
    +556    type: Literal["time"] = "time"
    +557    id: NonBatchAxisId = AxisId("time")
    +558    unit: Optional[TimeUnit] = None
    +559    scale: Annotated[float, Gt(0)] = 1.0
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type: Literal['time'] + + +
    + + + + +
    +
    +
    + id: Annotated[AxisId, Predicate(_is_not_batch)] + + +
    + + +

    An axis id unique across all axes of one tensor.

    +
    + + +
    +
    +
    + unit: Optional[Literal['attosecond', 'centisecond', 'day', 'decisecond', 'exasecond', 'femtosecond', 'gigasecond', 'hectosecond', 'hour', 'kilosecond', 'megasecond', 'microsecond', 'millisecond', 'minute', 'nanosecond', 'petasecond', 'picosecond', 'second', 'terasecond', 'yoctosecond', 'yottasecond', 'zeptosecond', 'zettasecond']] + + +
    + + + + +
    +
    +
    + scale: Annotated[float, Gt(gt=0)] + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + TimeInputAxis(TimeAxisBase, _WithInputAxisSize): + + + +
    + +
    562class TimeInputAxis(TimeAxisBase, _WithInputAxisSize):
    +563    concatenable: bool = False
    +564    """If a model has a `concatenable` input axis, it can be processed blockwise,
    +565    splitting a longer sample axis into blocks matching its input tensor description.
    +566    Output axes are concatenable if they have a `SizeReference` to a concatenable
    +567    input axis.
    +568    """
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + concatenable: bool + + +
    + + +

    If a model has a concatenable input axis, it can be processed blockwise, +splitting a longer sample axis into blocks matching its input tensor description. +Output axes are concatenable if they have a SizeReference to a concatenable +input axis.

    +
    + + +
    +
    +
    Inherited Members
    +
    + + + +
    +
    +
    +
    + +
    + + class + SpaceAxisBase(AxisBase): + + + +
    + +
    571class SpaceAxisBase(AxisBase):
    +572    type: Literal["space"] = "space"
    +573    id: Annotated[NonBatchAxisId, Field(examples=["x", "y", "z"])] = AxisId("x")
    +574    unit: Optional[SpaceUnit] = None
    +575    scale: Annotated[float, Gt(0)] = 1.0
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type: Literal['space'] + + +
    + + + + +
    +
    +
    + id: Annotated[AxisId, Predicate(_is_not_batch), FieldInfo(annotation=NoneType, required=True, examples=['x', 'y', 'z'])] + + +
    + + +

    An axis id unique across all axes of one tensor.

    +
    + + +
    +
    +
    + unit: Optional[Literal['attometer', 'angstrom', 'centimeter', 'decimeter', 'exameter', 'femtometer', 'foot', 'gigameter', 'hectometer', 'inch', 'kilometer', 'megameter', 'meter', 'micrometer', 'mile', 'millimeter', 'nanometer', 'parsec', 'petameter', 'picometer', 'terameter', 'yard', 'yoctometer', 'yottameter', 'zeptometer', 'zettameter']] + + +
    + + + + +
    +
    +
    + scale: Annotated[float, Gt(gt=0)] + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + SpaceInputAxis(SpaceAxisBase, _WithInputAxisSize): + + + +
    + +
    578class SpaceInputAxis(SpaceAxisBase, _WithInputAxisSize):
    +579    concatenable: bool = False
    +580    """If a model has a `concatenable` input axis, it can be processed blockwise,
    +581    splitting a longer sample axis into blocks matching its input tensor description.
    +582    Output axes are concatenable if they have a `SizeReference` to a concatenable
    +583    input axis.
    +584    """
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + concatenable: bool + + +
    + + +

    If a model has a concatenable input axis, it can be processed blockwise, +splitting a longer sample axis into blocks matching its input tensor description. +Output axes are concatenable if they have a SizeReference to a concatenable +input axis.

    +
    + + +
    +
    +
    Inherited Members
    +
    + + + +
    +
    +
    +
    +
    + InputAxis = + + typing.Annotated[typing.Union[BatchAxis, ChannelAxis, IndexInputAxis, TimeInputAxis, SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + + + +
    +
    + +
    + + class + TimeOutputAxis(TimeAxisBase, _WithOutputAxisSize): + + + +
    + +
    611class TimeOutputAxis(TimeAxisBase, _WithOutputAxisSize):
    +612    pass
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    Inherited Members
    +
    + + + +
    +
    +
    +
    + +
    + + class + TimeOutputAxisWithHalo(TimeAxisBase, WithHalo): + + + +
    + +
    615class TimeOutputAxisWithHalo(TimeAxisBase, WithHalo):
    +616    pass
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    Inherited Members
    +
    + + +
    WithHalo
    +
    halo
    +
    size
    + +
    +
    +
    +
    +
    + +
    + + class + SpaceOutputAxis(SpaceAxisBase, _WithOutputAxisSize): + + + +
    + +
    635class SpaceOutputAxis(SpaceAxisBase, _WithOutputAxisSize):
    +636    pass
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    Inherited Members
    +
    + + + +
    +
    +
    +
    + +
    + + class + SpaceOutputAxisWithHalo(SpaceAxisBase, WithHalo): + + + +
    + +
    639class SpaceOutputAxisWithHalo(SpaceAxisBase, WithHalo):
    +640    pass
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    Inherited Members
    +
    + + +
    WithHalo
    +
    halo
    +
    size
    + +
    +
    +
    +
    +
    +
    + OutputAxis = + + typing.Annotated[typing.Union[BatchAxis, ChannelAxis, IndexOutputAxis, typing.Annotated[typing.Union[typing.Annotated[TimeOutputAxis, Tag(tag='wo_halo')], typing.Annotated[TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[typing.Annotated[SpaceOutputAxis, Tag(tag='wo_halo')], typing.Annotated[SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + + + +
    +
    +
    + AnyAxis = + + typing.Union[typing.Annotated[typing.Union[BatchAxis, ChannelAxis, IndexInputAxis, TimeInputAxis, SpaceInputAxis], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[BatchAxis, ChannelAxis, IndexOutputAxis, typing.Annotated[typing.Union[typing.Annotated[TimeOutputAxis, Tag(tag='wo_halo')], typing.Annotated[TimeOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)], typing.Annotated[typing.Union[typing.Annotated[SpaceOutputAxis, Tag(tag='wo_halo')], typing.Annotated[SpaceOutputAxisWithHalo, Tag(tag='with_halo')]], Discriminator(discriminator=<function _get_halo_axis_discriminator_value>, custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)]] + + +
    + + + + +
    +
    +
    + TVs = + + typing.Union[typing.Annotated[typing.List[int], MinLen(min_length=1)], typing.Annotated[typing.List[float], MinLen(min_length=1)], typing.Annotated[typing.List[bool], MinLen(min_length=1)], typing.Annotated[typing.List[str], MinLen(min_length=1)]] + + +
    + + + + +
    +
    +
    + NominalOrOrdinalDType = + + typing.Literal['float32', 'float64', 'uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32', 'uint64', 'int64', 'bool'] + + +
    + + + + +
    +
    + +
    + + class + NominalOrOrdinalDataDescr(bioimageio.spec._internal.node.Node): + + + +
    + +
    682class NominalOrOrdinalDataDescr(Node):
    +683    values: TVs
    +684    """A fixed set of nominal or an ascending sequence of ordinal values.
    +685    In this case `data_type` is required to be an unsigend integer type, e.g. 'uint8'.
    +686    String `values` are interpreted as labels for tensor values 0, ..., N.
    +687    Note: as YAML 1.2 does not natively support a "set" datatype,
    +688    nominal values should be given as a sequence (aka list/array) as well.
    +689    """
    +690
    +691    type: Annotated[
    +692        NominalOrOrdinalDType,
    +693        Field(
    +694            examples=[
    +695                "float32",
    +696                "uint8",
    +697                "uint16",
    +698                "int64",
    +699                "bool",
    +700            ],
    +701        ),
    +702    ] = "uint8"
    +703
    +704    @model_validator(mode="after")
    +705    def _validate_values_match_type(
    +706        self,
    +707    ) -> Self:
    +708        incompatible: List[Any] = []
    +709        for v in self.values:
    +710            if self.type == "bool":
    +711                if not isinstance(v, bool):
    +712                    incompatible.append(v)
    +713            elif self.type in DTYPE_LIMITS:
    +714                if (
    +715                    isinstance(v, (int, float))
    +716                    and (
    +717                        v < DTYPE_LIMITS[self.type].min
    +718                        or v > DTYPE_LIMITS[self.type].max
    +719                    )
    +720                    or (isinstance(v, str) and "uint" not in self.type)
    +721                    or (isinstance(v, float) and "int" in self.type)
    +722                ):
    +723                    incompatible.append(v)
    +724            else:
    +725                incompatible.append(v)
    +726
    +727            if len(incompatible) == 5:
    +728                incompatible.append("...")
    +729                break
    +730
    +731        if incompatible:
    +732            raise ValueError(
    +733                f"data type '{self.type}' incompatible with values {incompatible}"
    +734            )
    +735
    +736        return self
    +737
    +738    unit: Optional[Union[Literal["arbitrary unit"], SiUnit]] = None
    +739
    +740    @property
    +741    def range(self):
    +742        if isinstance(self.values[0], str):
    +743            return 0, len(self.values) - 1
    +744        else:
    +745            return min(self.values), max(self.values)
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + values: Union[Annotated[List[int], MinLen(min_length=1)], Annotated[List[float], MinLen(min_length=1)], Annotated[List[bool], MinLen(min_length=1)], Annotated[List[str], MinLen(min_length=1)]] + + +
    + + +

    A fixed set of nominal or an ascending sequence of ordinal values. +In this case data_type is required to be an unsigend integer type, e.g. 'uint8'. +String values are interpreted as labels for tensor values 0, ..., N. +Note: as YAML 1.2 does not natively support a "set" datatype, +nominal values should be given as a sequence (aka list/array) as well.

    +
    + + +
    +
    +
    + type: Annotated[Literal['float32', 'float64', 'uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32', 'uint64', 'int64', 'bool'], FieldInfo(annotation=NoneType, required=True, examples=['float32', 'uint8', 'uint16', 'int64', 'bool'])] + + +
    + + + + +
    +
    +
    + unit: Union[Literal['arbitrary unit'], bioimageio.spec._internal.types.SiUnit, NoneType] + + +
    + + + + +
    +
    + +
    + range + + + +
    + +
    740    @property
    +741    def range(self):
    +742        if isinstance(self.values[0], str):
    +743            return 0, len(self.values) - 1
    +744        else:
    +745            return min(self.values), max(self.values)
    +
    + + + + +
    +
    +
    +
    + IntervalOrRatioDType = + + typing.Literal['float32', 'float64', 'uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32', 'uint64', 'int64'] + + +
    + + + + +
    +
    + +
    + + class + IntervalOrRatioDataDescr(bioimageio.spec._internal.node.Node): + + + +
    + +
    762class IntervalOrRatioDataDescr(Node):
    +763    type: Annotated[  # todo: rename to dtype
    +764        IntervalOrRatioDType,
    +765        Field(
    +766            examples=["float32", "float64", "uint8", "uint16"],
    +767        ),
    +768    ] = "float32"
    +769    range: Tuple[Optional[float], Optional[float]] = (
    +770        None,
    +771        None,
    +772    )
    +773    """Tuple `(minimum, maximum)` specifying the allowed range of the data in this tensor.
    +774    `None` corresponds to min/max of what can be expressed by `data_type`."""
    +775    unit: Union[Literal["arbitrary unit"], SiUnit] = "arbitrary unit"
    +776    scale: float = 1.0
    +777    """Scale for data on an interval (or ratio) scale."""
    +778    offset: Optional[float] = None
    +779    """Offset for data on a ratio scale."""
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type: Annotated[Literal['float32', 'float64', 'uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32', 'uint64', 'int64'], FieldInfo(annotation=NoneType, required=True, examples=['float32', 'float64', 'uint8', 'uint16'])] + + +
    + + + + +
    +
    +
    + range: Tuple[Optional[float], Optional[float]] + + +
    + + +

    Tuple (minimum, maximum) specifying the allowed range of the data in this tensor. +None corresponds to min/max of what can be expressed by data_type.

    +
    + + +
    +
    +
    + unit: Union[Literal['arbitrary unit'], bioimageio.spec._internal.types.SiUnit] + + +
    + + + + +
    +
    +
    + scale: float + + +
    + + +

    Scale for data on an interval (or ratio) scale.

    +
    + + +
    +
    +
    + offset: Optional[float] + + +
    + + +

    Offset for data on a ratio scale.

    +
    + + +
    +
    +
    +
    + TensorDataDescr = + + typing.Union[NominalOrOrdinalDataDescr, IntervalOrRatioDataDescr] + + +
    + + + + +
    +
    + +
    + + class + ProcessingDescrBase(bioimageio.spec._internal.common_nodes.NodeWithExplicitlySetFields, abc.ABC): + + + +
    + +
    785class ProcessingDescrBase(NodeWithExplicitlySetFields, ABC):
    +786    """processing base class"""
    +787
    +788    # id: Literal[PreprocessingId, PostprocessingId]  # make abstract field
    +789    fields_to_set_explicitly: ClassVar[FrozenSet[LiteralString]] = frozenset({"id"})
    +
    + + +

    processing base class

    +
    + + +
    +
    + fields_to_set_explicitly: ClassVar[FrozenSet[LiteralString]] = +frozenset({'id'}) + + +
    + + +

    set set these fields explicitly with their default value if they are not set, +such that they are always included even when dumping with 'exlude_unset'

    +
    + + +
    +
    +
    + +
    + + class + BinarizeKwargs(bioimageio.spec.model.v0_4.ProcessingKwargs): + + + +
    + +
    792class BinarizeKwargs(ProcessingKwargs):
    +793    """key word arguments for `BinarizeDescr`"""
    +794
    +795    threshold: float
    +796    """The fixed threshold"""
    +
    + + +

    key word arguments for BinarizeDescr

    +
    + + +
    +
    + threshold: float + + +
    + + +

    The fixed threshold

    +
    + + +
    +
    +
    + +
    + + class + BinarizeAlongAxisKwargs(bioimageio.spec.model.v0_4.ProcessingKwargs): + + + +
    + +
    799class BinarizeAlongAxisKwargs(ProcessingKwargs):
    +800    """key word arguments for `BinarizeDescr`"""
    +801
    +802    threshold: NotEmpty[List[float]]
    +803    """The fixed threshold values along `axis`"""
    +804
    +805    axis: Annotated[NonBatchAxisId, Field(examples=["channel"])]
    +806    """The `threshold` axis"""
    +
    + + +

    key word arguments for BinarizeDescr

    +
    + + +
    +
    + threshold: Annotated[List[float], MinLen(min_length=1)] + + +
    + + +

    The fixed threshold values along axis

    +
    + + +
    +
    +
    + axis: Annotated[AxisId, Predicate(_is_not_batch), FieldInfo(annotation=NoneType, required=True, examples=['channel'])] + + +
    + + +

    The threshold axis

    +
    + + +
    +
    +
    + +
    + + class + BinarizeDescr(ProcessingDescrBase): + + + +
    + +
    809class BinarizeDescr(ProcessingDescrBase):
    +810    """Binarize the tensor with a fixed threshold.
    +811
    +812    Values above `BinarizeKwargs.threshold`/`BinarizeAlongAxisKwargs.threshold`
    +813    will be set to one, values below the threshold to zero.
    +814    """
    +815
    +816    id: Literal["binarize"] = "binarize"
    +817    kwargs: Union[BinarizeKwargs, BinarizeAlongAxisKwargs]
    +
    + + +

    Binarize the tensor with a fixed threshold.

    + +

    Values above BinarizeKwargs.threshold/BinarizeAlongAxisKwargs.threshold +will be set to one, values below the threshold to zero.

    +
    + + +
    +
    + id: Literal['binarize'] + + +
    + + + + +
    +
    +
    + kwargs: Union[BinarizeKwargs, BinarizeAlongAxisKwargs] + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + ClipDescr(ProcessingDescrBase): + + + +
    + +
    820class ClipDescr(ProcessingDescrBase):
    +821    """Set tensor values below min to min and above max to max."""
    +822
    +823    id: Literal["clip"] = "clip"
    +824    kwargs: ClipKwargs
    +
    + + +

    Set tensor values below min to min and above max to max.

    +
    + + +
    +
    + id: Literal['clip'] + + +
    + + + + +
    +
    + + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + EnsureDtypeKwargs(bioimageio.spec.model.v0_4.ProcessingKwargs): + + + +
    + +
    827class EnsureDtypeKwargs(ProcessingKwargs):
    +828    """key word arguments for `EnsureDtypeDescr`"""
    +829
    +830    dtype: Literal[
    +831        "float32",
    +832        "float64",
    +833        "uint8",
    +834        "int8",
    +835        "uint16",
    +836        "int16",
    +837        "uint32",
    +838        "int32",
    +839        "uint64",
    +840        "int64",
    +841        "bool",
    +842    ]
    +
    + + +

    key word arguments for EnsureDtypeDescr

    +
    + + +
    +
    + dtype: Literal['float32', 'float64', 'uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32', 'uint64', 'int64', 'bool'] + + +
    + + + + +
    +
    +
    + +
    + + class + EnsureDtypeDescr(ProcessingDescrBase): + + + +
    + +
    845class EnsureDtypeDescr(ProcessingDescrBase):
    +846    """cast the tensor data type to `EnsureDtypeKwargs.dtype` (if not matching)"""
    +847
    +848    id: Literal["ensure_dtype"] = "ensure_dtype"
    +849    kwargs: EnsureDtypeKwargs
    +
    + + +

    cast the tensor data type to EnsureDtypeKwargs.dtype (if not matching)

    +
    + + +
    +
    + id: Literal['ensure_dtype'] + + +
    + + + + +
    +
    +
    + kwargs: EnsureDtypeKwargs + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + ScaleLinearKwargs(bioimageio.spec.model.v0_4.ProcessingKwargs): + + + +
    + +
    852class ScaleLinearKwargs(ProcessingKwargs):
    +853    """key word arguments for `ScaleLinearDescr`"""
    +854
    +855    gain: float = 1.0
    +856    """multiplicative factor"""
    +857
    +858    offset: float = 0.0
    +859    """additive term"""
    +860
    +861    @model_validator(mode="after")
    +862    def _validate(self) -> Self:
    +863        if self.gain == 1.0 and self.offset == 0.0:
    +864            raise ValueError(
    +865                "Redundant linear scaling not allowd. Set `gain` != 1.0 and/or `offset`"
    +866                + " != 0.0."
    +867            )
    +868
    +869        return self
    +
    + + +

    key word arguments for ScaleLinearDescr

    +
    + + +
    +
    + gain: float + + +
    + + +

    multiplicative factor

    +
    + + +
    +
    +
    + offset: float + + +
    + + +

    additive term

    +
    + + +
    +
    +
    + +
    + + class + ScaleLinearAlongAxisKwargs(bioimageio.spec.model.v0_4.ProcessingKwargs): + + + +
    + +
    872class ScaleLinearAlongAxisKwargs(ProcessingKwargs):
    +873    """key word arguments for `ScaleLinearDescr`"""
    +874
    +875    axis: Annotated[NonBatchAxisId, Field(examples=["channel"])]
    +876    """The axis of of gains/offsets values."""
    +877
    +878    gain: Union[float, NotEmpty[List[float]]] = 1.0
    +879    """multiplicative factor"""
    +880
    +881    offset: Union[float, NotEmpty[List[float]]] = 0.0
    +882    """additive term"""
    +883
    +884    @model_validator(mode="after")
    +885    def _validate(self) -> Self:
    +886
    +887        if isinstance(self.gain, list):
    +888            if isinstance(self.offset, list):
    +889                if len(self.gain) != len(self.offset):
    +890                    raise ValueError(
    +891                        f"Size of `gain` ({len(self.gain)}) and `offset` ({len(self.offset)}) must match."
    +892                    )
    +893            else:
    +894                self.offset = [float(self.offset)] * len(self.gain)
    +895        elif isinstance(self.offset, list):
    +896            self.gain = [float(self.gain)] * len(self.offset)
    +897        else:
    +898            raise ValueError(
    +899                "Do not specify an `axis` for scalar gain and offset values."
    +900            )
    +901
    +902        if all(g == 1.0 for g in self.gain) and all(off == 0.0 for off in self.offset):
    +903            raise ValueError(
    +904                "Redundant linear scaling not allowd. Set `gain` != 1.0 and/or `offset`"
    +905                + " != 0.0."
    +906            )
    +907
    +908        return self
    +
    + + +

    key word arguments for ScaleLinearDescr

    +
    + + +
    +
    + axis: Annotated[AxisId, Predicate(_is_not_batch), FieldInfo(annotation=NoneType, required=True, examples=['channel'])] + + +
    + + +

    The axis of of gains/offsets values.

    +
    + + +
    +
    +
    + gain: Union[float, Annotated[List[float], MinLen(min_length=1)]] + + +
    + + +

    multiplicative factor

    +
    + + +
    +
    +
    + offset: Union[float, Annotated[List[float], MinLen(min_length=1)]] + + +
    + + +

    additive term

    +
    + + +
    +
    +
    + +
    + + class + ScaleLinearDescr(ProcessingDescrBase): + + + +
    + +
    911class ScaleLinearDescr(ProcessingDescrBase):
    +912    """Fixed linear scaling."""
    +913
    +914    id: Literal["scale_linear"] = "scale_linear"
    +915    kwargs: Union[ScaleLinearKwargs, ScaleLinearAlongAxisKwargs]
    +
    + + +

    Fixed linear scaling.

    +
    + + +
    +
    + id: Literal['scale_linear'] + + +
    + + + + +
    +
    + + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + SigmoidDescr(ProcessingDescrBase): + + + +
    + +
    918class SigmoidDescr(ProcessingDescrBase):
    +919    """The logistic sigmoid funciton, a.k.a. expit function."""
    +920
    +921    id: Literal["sigmoid"] = "sigmoid"
    +922
    +923    @property
    +924    def kwargs(self) -> ProcessingKwargs:
    +925        """empty kwargs"""
    +926        return ProcessingKwargs()
    +
    + + +

    The logistic sigmoid funciton, a.k.a. expit function.

    +
    + + +
    +
    + id: Literal['sigmoid'] + + +
    + + + + +
    +
    + +
    + kwargs: bioimageio.spec.model.v0_4.ProcessingKwargs + + + +
    + +
    923    @property
    +924    def kwargs(self) -> ProcessingKwargs:
    +925        """empty kwargs"""
    +926        return ProcessingKwargs()
    +
    + + +

    empty kwargs

    +
    + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + FixedZeroMeanUnitVarianceKwargs(bioimageio.spec.model.v0_4.ProcessingKwargs): + + + +
    + +
    929class FixedZeroMeanUnitVarianceKwargs(ProcessingKwargs):
    +930    """key word arguments for `FixedZeroMeanUnitVarianceDescr`"""
    +931
    +932    mean: float
    +933    """The mean value to normalize with."""
    +934
    +935    std: Annotated[float, Ge(1e-6)]
    +936    """The standard deviation value to normalize with."""
    +
    + + +

    key word arguments for FixedZeroMeanUnitVarianceDescr

    +
    + + +
    +
    + mean: float + + +
    + + +

    The mean value to normalize with.

    +
    + + +
    +
    +
    + std: Annotated[float, Ge(ge=1e-06)] + + +
    + + +

    The standard deviation value to normalize with.

    +
    + + +
    +
    +
    + +
    + + class + FixedZeroMeanUnitVarianceAlongAxisKwargs(bioimageio.spec.model.v0_4.ProcessingKwargs): + + + +
    + +
    939class FixedZeroMeanUnitVarianceAlongAxisKwargs(ProcessingKwargs):
    +940    """key word arguments for `FixedZeroMeanUnitVarianceDescr`"""
    +941
    +942    mean: NotEmpty[List[float]]
    +943    """The mean value(s) to normalize with."""
    +944
    +945    std: NotEmpty[List[Annotated[float, Ge(1e-6)]]]
    +946    """The standard deviation value(s) to normalize with.
    +947    Size must match `mean` values."""
    +948
    +949    axis: Annotated[NonBatchAxisId, Field(examples=["channel", "index"])]
    +950    """The axis of the mean/std values to normalize each entry along that dimension
    +951    separately."""
    +952
    +953    @model_validator(mode="after")
    +954    def _mean_and_std_match(self) -> Self:
    +955        if len(self.mean) != len(self.std):
    +956            raise ValueError(
    +957                f"Size of `mean` ({len(self.mean)}) and `std` ({len(self.std)})"
    +958                + " must match."
    +959            )
    +960
    +961        return self
    +
    + + +

    key word arguments for FixedZeroMeanUnitVarianceDescr

    +
    + + +
    +
    + mean: Annotated[List[float], MinLen(min_length=1)] + + +
    + + +

    The mean value(s) to normalize with.

    +
    + + +
    +
    +
    + std: Annotated[List[Annotated[float, Ge(ge=1e-06)]], MinLen(min_length=1)] + + +
    + + +

    The standard deviation value(s) to normalize with. +Size must match mean values.

    +
    + + +
    +
    +
    + axis: Annotated[AxisId, Predicate(_is_not_batch), FieldInfo(annotation=NoneType, required=True, examples=['channel', 'index'])] + + +
    + + +

    The axis of the mean/std values to normalize each entry along that dimension +separately.

    +
    + + +
    +
    +
    + +
    + + class + FixedZeroMeanUnitVarianceDescr(ProcessingDescrBase): + + + +
    + +
    964class FixedZeroMeanUnitVarianceDescr(ProcessingDescrBase):
    +965    """Subtract a given mean and divide by the standard deviation.
    +966
    +967    Normalize with fixed, precomputed values for
    +968    `FixedZeroMeanUnitVarianceKwargs.mean` and `FixedZeroMeanUnitVarianceKwargs.std`
    +969    Use `FixedZeroMeanUnitVarianceAlongAxisKwargs` for independent scaling along given
    +970    axes.
    +971    """
    +972
    +973    id: Literal["fixed_zero_mean_unit_variance"] = "fixed_zero_mean_unit_variance"
    +974    kwargs: Union[
    +975        FixedZeroMeanUnitVarianceKwargs, FixedZeroMeanUnitVarianceAlongAxisKwargs
    +976    ]
    +
    + + +

    Subtract a given mean and divide by the standard deviation.

    + +

    Normalize with fixed, precomputed values for +FixedZeroMeanUnitVarianceKwargs.mean and FixedZeroMeanUnitVarianceKwargs.std +Use FixedZeroMeanUnitVarianceAlongAxisKwargs for independent scaling along given +axes.

    +
    + + +
    +
    + id: Literal['fixed_zero_mean_unit_variance'] + + +
    + + + + +
    + +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + ZeroMeanUnitVarianceKwargs(bioimageio.spec.model.v0_4.ProcessingKwargs): + + + +
    + +
    979class ZeroMeanUnitVarianceKwargs(ProcessingKwargs):
    +980    """key word arguments for `ZeroMeanUnitVarianceDescr`"""
    +981
    +982    axes: Annotated[
    +983        Optional[Sequence[AxisId]], Field(examples=[("batch", "x", "y")])
    +984    ] = None
    +985    """The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std.
    +986    For example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')
    +987    resulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.
    +988    To normalize each sample independently leave out the 'batch' axis.
    +989    Default: Scale all axes jointly."""
    +990
    +991    eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-6
    +992    """epsilon for numeric stability: `out = (tensor - mean) / (std + eps)`."""
    +
    + + +

    key word arguments for ZeroMeanUnitVarianceDescr

    +
    + + +
    +
    + axes: Annotated[Optional[Sequence[AxisId]], FieldInfo(annotation=NoneType, required=True, examples=[('batch', 'x', 'y')])] + + +
    + + +

    The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std. +For example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x') +resulting in a tensor of equal shape normalized per channel, specify axes=('batch', 'x', 'y'). +To normalize each sample independently leave out the 'batch' axis. +Default: Scale all axes jointly.

    +
    + + +
    +
    +
    + eps: Annotated[float, Interval(gt=0, ge=None, lt=None, le=0.1)] + + +
    + + +

    epsilon for numeric stability: out = (tensor - mean) / (std + eps).

    +
    + + +
    +
    +
    + +
    + + class + ZeroMeanUnitVarianceDescr(ProcessingDescrBase): + + + +
    + +
    995class ZeroMeanUnitVarianceDescr(ProcessingDescrBase):
    +996    """Subtract mean and divide by variance."""
    +997
    +998    id: Literal["zero_mean_unit_variance"] = "zero_mean_unit_variance"
    +999    kwargs: ZeroMeanUnitVarianceKwargs
    +
    + + +

    Subtract mean and divide by variance.

    +
    + + +
    +
    + id: Literal['zero_mean_unit_variance'] + + +
    + + + + +
    +
    +
    + kwargs: ZeroMeanUnitVarianceKwargs + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + ScaleRangeKwargs(bioimageio.spec.model.v0_4.ProcessingKwargs): + + + +
    + +
    1002class ScaleRangeKwargs(ProcessingKwargs):
    +1003    """key word arguments for `ScaleRangeDescr`
    +1004
    +1005    For `min_percentile`=0.0 (the default) and `max_percentile`=100 (the default)
    +1006    this processing step normalizes data to the [0, 1] intervall.
    +1007    For other percentiles the normalized values will partially be outside the [0, 1]
    +1008    intervall. Use `ScaleRange` followed by `ClipDescr` if you want to limit the
    +1009    normalized values to a range.
    +1010    """
    +1011
    +1012    axes: Annotated[
    +1013        Optional[Sequence[AxisId]], Field(examples=[("batch", "x", "y")])
    +1014    ] = None
    +1015    """The subset of axes to normalize jointly, i.e. axes to reduce to compute the min/max percentile value.
    +1016    For example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')
    +1017    resulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.
    +1018    To normalize samples indepdencently, leave out the "batch" axis.
    +1019    Default: Scale all axes jointly."""
    +1020
    +1021    min_percentile: Annotated[float, Interval(ge=0, lt=100)] = 0.0
    +1022    """The lower percentile used to determine the value to align with zero."""
    +1023
    +1024    max_percentile: Annotated[float, Interval(gt=1, le=100)] = 100.0
    +1025    """The upper percentile used to determine the value to align with one.
    +1026    Has to be bigger than `min_percentile`.
    +1027    The range is 1 to 100 instead of 0 to 100 to avoid mistakenly
    +1028    accepting percentiles specified in the range 0.0 to 1.0."""
    +1029
    +1030    eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-6
    +1031    """Epsilon for numeric stability.
    +1032    `out = (tensor - v_lower) / (v_upper - v_lower + eps)`;
    +1033    with `v_lower,v_upper` values at the respective percentiles."""
    +1034
    +1035    reference_tensor: Optional[TensorId] = None
    +1036    """Tensor ID to compute the percentiles from. Default: The tensor itself.
    +1037    For any tensor in `inputs` only input tensor references are allowed."""
    +1038
    +1039    @field_validator("max_percentile", mode="after")
    +1040    @classmethod
    +1041    def min_smaller_max(cls, value: float, info: ValidationInfo) -> float:
    +1042        if (min_p := info.data["min_percentile"]) >= value:
    +1043            raise ValueError(f"min_percentile {min_p} >= max_percentile {value}")
    +1044
    +1045        return value
    +
    + + +

    key word arguments for ScaleRangeDescr

    + +

    For min_percentile=0.0 (the default) and max_percentile=100 (the default) +this processing step normalizes data to the [0, 1] intervall. +For other percentiles the normalized values will partially be outside the [0, 1] +intervall. Use ScaleRange followed by ClipDescr if you want to limit the +normalized values to a range.

    +
    + + +
    +
    + axes: Annotated[Optional[Sequence[AxisId]], FieldInfo(annotation=NoneType, required=True, examples=[('batch', 'x', 'y')])] + + +
    + + +

    The subset of axes to normalize jointly, i.e. axes to reduce to compute the min/max percentile value. +For example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x') +resulting in a tensor of equal shape normalized per channel, specify axes=('batch', 'x', 'y'). +To normalize samples indepdencently, leave out the "batch" axis. +Default: Scale all axes jointly.

    +
    + + +
    +
    +
    + min_percentile: Annotated[float, Interval(gt=None, ge=0, lt=100, le=None)] + + +
    + + +

    The lower percentile used to determine the value to align with zero.

    +
    + + +
    +
    +
    + max_percentile: Annotated[float, Interval(gt=1, ge=None, lt=None, le=100)] + + +
    + + +

    The upper percentile used to determine the value to align with one. +Has to be bigger than min_percentile. +The range is 1 to 100 instead of 0 to 100 to avoid mistakenly +accepting percentiles specified in the range 0.0 to 1.0.

    +
    + + +
    +
    +
    + eps: Annotated[float, Interval(gt=0, ge=None, lt=None, le=0.1)] + + +
    + + +

    Epsilon for numeric stability. +out = (tensor - v_lower) / (v_upper - v_lower + eps); +with v_lower,v_upper values at the respective percentiles.

    +
    + + +
    +
    +
    + reference_tensor: Optional[TensorId] + + +
    + + +

    Tensor ID to compute the percentiles from. Default: The tensor itself. +For any tensor in inputs only input tensor references are allowed.

    +
    + + +
    +
    + +
    +
    @field_validator('max_percentile', mode='after')
    +
    @classmethod
    + + def + min_smaller_max( cls, value: float, info: pydantic_core.core_schema.ValidationInfo) -> float: + + + +
    + +
    1039    @field_validator("max_percentile", mode="after")
    +1040    @classmethod
    +1041    def min_smaller_max(cls, value: float, info: ValidationInfo) -> float:
    +1042        if (min_p := info.data["min_percentile"]) >= value:
    +1043            raise ValueError(f"min_percentile {min_p} >= max_percentile {value}")
    +1044
    +1045        return value
    +
    + + + + +
    +
    +
    + +
    + + class + ScaleRangeDescr(ProcessingDescrBase): + + + +
    + +
    1048class ScaleRangeDescr(ProcessingDescrBase):
    +1049    """Scale with percentiles."""
    +1050
    +1051    id: Literal["scale_range"] = "scale_range"
    +1052    kwargs: ScaleRangeKwargs
    +
    + + +

    Scale with percentiles.

    +
    + + +
    +
    + id: Literal['scale_range'] + + +
    + + + + +
    +
    +
    + kwargs: ScaleRangeKwargs + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + ScaleMeanVarianceKwargs(bioimageio.spec.model.v0_4.ProcessingKwargs): + + + +
    + +
    1055class ScaleMeanVarianceKwargs(ProcessingKwargs):
    +1056    """key word arguments for `ScaleMeanVarianceKwargs`"""
    +1057
    +1058    reference_tensor: TensorId
    +1059    """Name of tensor to match."""
    +1060
    +1061    axes: Annotated[
    +1062        Optional[Sequence[AxisId]], Field(examples=[("batch", "x", "y")])
    +1063    ] = None
    +1064    """The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std.
    +1065    For example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x')
    +1066    resulting in a tensor of equal shape normalized per channel, specify `axes=('batch', 'x', 'y')`.
    +1067    To normalize samples independently, leave out the 'batch' axis.
    +1068    Default: Scale all axes jointly."""
    +1069
    +1070    eps: Annotated[float, Interval(gt=0, le=0.1)] = 1e-6
    +1071    """Epsilon for numeric stability:
    +1072    `out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.`"""
    +
    + + +

    key word arguments for ScaleMeanVarianceKwargs

    +
    + + +
    +
    + reference_tensor: TensorId + + +
    + + +

    Name of tensor to match.

    +
    + + +
    +
    +
    + axes: Annotated[Optional[Sequence[AxisId]], FieldInfo(annotation=NoneType, required=True, examples=[('batch', 'x', 'y')])] + + +
    + + +

    The subset of axes to normalize jointly, i.e. axes to reduce to compute mean/std. +For example to normalize 'batch', 'x' and 'y' jointly in a tensor ('batch', 'channel', 'y', 'x') +resulting in a tensor of equal shape normalized per channel, specify axes=('batch', 'x', 'y'). +To normalize samples independently, leave out the 'batch' axis. +Default: Scale all axes jointly.

    +
    + + +
    +
    +
    + eps: Annotated[float, Interval(gt=0, ge=None, lt=None, le=0.1)] + + +
    + + +

    Epsilon for numeric stability: +out = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.

    +
    + + +
    +
    +
    + +
    + + class + ScaleMeanVarianceDescr(ProcessingDescrBase): + + + +
    + +
    1075class ScaleMeanVarianceDescr(ProcessingDescrBase):
    +1076    """Scale a tensor's data distribution to match another tensor's mean/std.
    +1077    `out  = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.`
    +1078    """
    +1079
    +1080    id: Literal["scale_mean_variance"] = "scale_mean_variance"
    +1081    kwargs: ScaleMeanVarianceKwargs
    +
    + + +

    Scale a tensor's data distribution to match another tensor's mean/std. +out = (tensor - mean) / (std + eps) * (ref_std + eps) + ref_mean.

    +
    + + +
    +
    + id: Literal['scale_mean_variance'] + + +
    + + + + +
    +
    +
    + kwargs: ScaleMeanVarianceKwargs + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    +
    + PreprocessingDescr = + + typing.Annotated[typing.Union[BinarizeDescr, ClipDescr, EnsureDtypeDescr, ScaleLinearDescr, SigmoidDescr, FixedZeroMeanUnitVarianceDescr, ZeroMeanUnitVarianceDescr, ScaleRangeDescr], Discriminator(discriminator='id', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + + + +
    +
    +
    + PostprocessingDescr = + + typing.Annotated[typing.Union[BinarizeDescr, ClipDescr, EnsureDtypeDescr, ScaleLinearDescr, SigmoidDescr, FixedZeroMeanUnitVarianceDescr, ZeroMeanUnitVarianceDescr, ScaleRangeDescr, ScaleMeanVarianceDescr], Discriminator(discriminator='id', custom_error_type=None, custom_error_message=None, custom_error_context=None)] + + +
    + + + + +
    +
    + +
    + + class + TensorDescrBase(bioimageio.spec._internal.node.Node, typing.Generic[~IO_AxisT]): + + + +
    + +
    1115class TensorDescrBase(Node, Generic[IO_AxisT]):
    +1116    id: TensorId
    +1117    """Tensor id. No duplicates are allowed."""
    +1118
    +1119    description: Annotated[str, MaxLen(128)] = ""
    +1120    """free text description"""
    +1121
    +1122    axes: NotEmpty[Sequence[IO_AxisT]]
    +1123    """tensor axes"""
    +1124
    +1125    @property
    +1126    def shape(self):
    +1127        return tuple(a.size for a in self.axes)
    +1128
    +1129    @field_validator("axes", mode="after", check_fields=False)
    +1130    @classmethod
    +1131    def _validate_axes(cls, axes: Sequence[AnyAxis]) -> Sequence[AnyAxis]:
    +1132        batch_axes = [a for a in axes if a.type == "batch"]
    +1133        if len(batch_axes) > 1:
    +1134            raise ValueError(
    +1135                f"Only one batch axis (per tensor) allowed, but got {batch_axes}"
    +1136            )
    +1137
    +1138        seen_ids: Set[AxisId] = set()
    +1139        duplicate_axes_ids: Set[AxisId] = set()
    +1140        for a in axes:
    +1141            (duplicate_axes_ids if a.id in seen_ids else seen_ids).add(a.id)
    +1142
    +1143        if duplicate_axes_ids:
    +1144            raise ValueError(f"Duplicate axis ids: {duplicate_axes_ids}")
    +1145
    +1146        return axes
    +1147
    +1148    test_tensor: FileDescr
    +1149    """An example tensor to use for testing.
    +1150    Using the model with the test input tensors is expected to yield the test output tensors.
    +1151    Each test tensor has be a an ndarray in the
    +1152    [numpy.lib file format](https://numpy.org/doc/stable/reference/generated/numpy.lib.format.html#module-numpy.lib.format).
    +1153    The file extension must be '.npy'."""
    +1154
    +1155    sample_tensor: Optional[FileDescr] = None
    +1156    """A sample tensor to illustrate a possible input/output for the model,
    +1157    The sample image primarily serves to inform a human user about an example use case
    +1158    and is typically stored as .hdf5, .png or .tiff.
    +1159    It has to be readable by the [imageio library](https://imageio.readthedocs.io/en/stable/formats/index.html#supported-formats)
    +1160    (numpy's `.npy` format is not supported).
    +1161    The image dimensionality has to match the number of axes specified in this tensor description.
    +1162    """
    +1163
    +1164    @model_validator(mode="after")
    +1165    def _validate_sample_tensor(self) -> Self:
    +1166        if (
    +1167            self.sample_tensor is None
    +1168            or not validation_context_var.get().perform_io_checks
    +1169        ):
    +1170            return self
    +1171
    +1172        local = download(self.sample_tensor.source, sha256=self.sample_tensor.sha256)
    +1173        tensor: NDArray[Any] = imread(
    +1174            local.path.read_bytes(),
    +1175            extension=PurePosixPath(local.original_file_name).suffix,
    +1176        )
    +1177        n_dims = len(tensor.squeeze().shape)
    +1178        n_dims_min = n_dims_max = len(self.axes)
    +1179
    +1180        for a in self.axes:
    +1181            if isinstance(a, BatchAxis):
    +1182                n_dims_min -= 1
    +1183            elif isinstance(a.size, int):
    +1184                if a.size == 1:
    +1185                    n_dims_min -= 1
    +1186            elif isinstance(a.size, (ParameterizedSize, DataDependentSize)):
    +1187                if a.size.min == 1:
    +1188                    n_dims_min -= 1
    +1189            elif isinstance(a.size, SizeReference):
    +1190                if a.size.offset < 2:
    +1191                    # size reference may result in singleton axis
    +1192                    n_dims_min -= 1
    +1193            else:
    +1194                assert_never(a.size)
    +1195
    +1196        n_dims_min = max(0, n_dims_min)
    +1197        if n_dims < n_dims_min or n_dims > n_dims_max:
    +1198            raise ValueError(
    +1199                f"Expected sample tensor to have {n_dims_min} to"
    +1200                + f" {n_dims_max} dimensions, but found {n_dims} (shape: {tensor.shape})."
    +1201            )
    +1202
    +1203        return self
    +1204
    +1205    data: Union[TensorDataDescr, NotEmpty[Sequence[TensorDataDescr]]] = (
    +1206        IntervalOrRatioDataDescr()
    +1207    )
    +1208    """Description of the tensor's data values, optionally per channel.
    +1209    If specified per channel, the data `type` needs to match across channels."""
    +1210
    +1211    @property
    +1212    def dtype(
    +1213        self,
    +1214    ) -> Literal[
    +1215        "float32",
    +1216        "float64",
    +1217        "uint8",
    +1218        "int8",
    +1219        "uint16",
    +1220        "int16",
    +1221        "uint32",
    +1222        "int32",
    +1223        "uint64",
    +1224        "int64",
    +1225        "bool",
    +1226    ]:
    +1227        """dtype as specified under `data.type` or `data[i].type`"""
    +1228        if isinstance(self.data, collections.abc.Sequence):
    +1229            return self.data[0].type
    +1230        else:
    +1231            return self.data.type
    +1232
    +1233    @field_validator("data", mode="after")
    +1234    @classmethod
    +1235    def _check_data_type_across_channels(
    +1236        cls, value: Union[TensorDataDescr, NotEmpty[Sequence[TensorDataDescr]]]
    +1237    ) -> Union[TensorDataDescr, NotEmpty[Sequence[TensorDataDescr]]]:
    +1238        if not isinstance(value, list):
    +1239            return value
    +1240
    +1241        dtypes = {t.type for t in value}
    +1242        if len(dtypes) > 1:
    +1243            raise ValueError(
    +1244                "Tensor data descriptions per channel need to agree in their data"
    +1245                + f" `type`, but found {dtypes}."
    +1246            )
    +1247
    +1248        return value
    +1249
    +1250    @model_validator(mode="after")
    +1251    def _check_data_matches_channelaxis(self) -> Self:
    +1252        if not isinstance(self.data, (list, tuple)):
    +1253            return self
    +1254
    +1255        for a in self.axes:
    +1256            if isinstance(a, ChannelAxis):
    +1257                size = a.size
    +1258                assert isinstance(size, int)
    +1259                break
    +1260        else:
    +1261            return self
    +1262
    +1263        if len(self.data) != size:
    +1264            raise ValueError(
    +1265                f"Got tensor data descriptions for {len(self.data)} channels, but"
    +1266                + f" '{a.id}' axis has size {size}."
    +1267            )
    +1268
    +1269        return self
    +1270
    +1271    def get_axis_sizes_for_array(self, array: NDArray[Any]) -> Dict[AxisId, int]:
    +1272        if len(array.shape) != len(self.axes):
    +1273            raise ValueError(
    +1274                f"Dimension mismatch: array shape {array.shape} (#{len(array.shape)})"
    +1275                + f" incompatible with {len(self.axes)} axes."
    +1276            )
    +1277        return {a.id: array.shape[i] for i, a in enumerate(self.axes)}
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + id: TensorId + + +
    + + +

    Tensor id. No duplicates are allowed.

    +
    + + +
    +
    +
    + description: Annotated[str, MaxLen(max_length=128)] + + +
    + + +

    free text description

    +
    + + +
    +
    +
    + axes: Annotated[Sequence[~IO_AxisT], MinLen(min_length=1)] + + +
    + + +

    tensor axes

    +
    + + +
    +
    + +
    + shape + + + +
    + +
    1125    @property
    +1126    def shape(self):
    +1127        return tuple(a.size for a in self.axes)
    +
    + + + + +
    +
    +
    + test_tensor: bioimageio.spec._internal.io.FileDescr + + +
    + + +

    An example tensor to use for testing. +Using the model with the test input tensors is expected to yield the test output tensors. +Each test tensor has be a an ndarray in the +numpy.lib file format. +The file extension must be '.npy'.

    +
    + + +
    +
    +
    + sample_tensor: Optional[bioimageio.spec._internal.io.FileDescr] + + +
    + + +

    A sample tensor to illustrate a possible input/output for the model, +The sample image primarily serves to inform a human user about an example use case +and is typically stored as .hdf5, .png or .tiff. +It has to be readable by the imageio library +(numpy's .npy format is not supported). +The image dimensionality has to match the number of axes specified in this tensor description.

    +
    + + +
    +
    +
    + data: Union[NominalOrOrdinalDataDescr, IntervalOrRatioDataDescr, Annotated[Sequence[Union[NominalOrOrdinalDataDescr, IntervalOrRatioDataDescr]], MinLen(min_length=1)]] + + +
    + + +

    Description of the tensor's data values, optionally per channel. +If specified per channel, the data type needs to match across channels.

    +
    + + +
    +
    + +
    + dtype: Literal['float32', 'float64', 'uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32', 'uint64', 'int64', 'bool'] + + + +
    + +
    1211    @property
    +1212    def dtype(
    +1213        self,
    +1214    ) -> Literal[
    +1215        "float32",
    +1216        "float64",
    +1217        "uint8",
    +1218        "int8",
    +1219        "uint16",
    +1220        "int16",
    +1221        "uint32",
    +1222        "int32",
    +1223        "uint64",
    +1224        "int64",
    +1225        "bool",
    +1226    ]:
    +1227        """dtype as specified under `data.type` or `data[i].type`"""
    +1228        if isinstance(self.data, collections.abc.Sequence):
    +1229            return self.data[0].type
    +1230        else:
    +1231            return self.data.type
    +
    + + +

    dtype as specified under data.type or data[i].type

    +
    + + +
    +
    + +
    + + def + get_axis_sizes_for_array( self, array: numpy.ndarray[typing.Any, numpy.dtype[typing.Any]]) -> Dict[AxisId, int]: + + + +
    + +
    1271    def get_axis_sizes_for_array(self, array: NDArray[Any]) -> Dict[AxisId, int]:
    +1272        if len(array.shape) != len(self.axes):
    +1273            raise ValueError(
    +1274                f"Dimension mismatch: array shape {array.shape} (#{len(array.shape)})"
    +1275                + f" incompatible with {len(self.axes)} axes."
    +1276            )
    +1277        return {a.id: array.shape[i] for i, a in enumerate(self.axes)}
    +
    + + + + +
    +
    +
    + +
    + + class + InputTensorDescr(bioimageio.spec._internal.node.Node, typing.Generic[~IO_AxisT]): + + + +
    + +
    1280class InputTensorDescr(TensorDescrBase[InputAxis]):
    +1281    id: TensorId = TensorId("input")
    +1282    """Input tensor id.
    +1283    No duplicates are allowed across all inputs and outputs."""
    +1284
    +1285    optional: bool = False
    +1286    """indicates that this tensor may be `None`"""
    +1287
    +1288    preprocessing: List[PreprocessingDescr] = Field(default_factory=list)
    +1289    """Description of how this input should be preprocessed.
    +1290
    +1291    notes:
    +1292    - If preprocessing does not start with an 'ensure_dtype' entry, it is added
    +1293      to ensure an input tensor's data type matches the input tensor's data description.
    +1294    - If preprocessing does not end with an 'ensure_dtype' or 'binarize' entry, an
    +1295      'ensure_dtype' step is added to ensure preprocessing steps are not unintentionally
    +1296      changing the data type.
    +1297    """
    +1298
    +1299    @model_validator(mode="after")
    +1300    def _validate_preprocessing_kwargs(self) -> Self:
    +1301        axes_ids = [a.id for a in self.axes]
    +1302        for p in self.preprocessing:
    +1303            kwargs_axes: Optional[Sequence[Any]] = p.kwargs.get("axes")
    +1304            if kwargs_axes is None:
    +1305                continue
    +1306
    +1307            if not isinstance(kwargs_axes, collections.abc.Sequence):
    +1308                raise ValueError(
    +1309                    f"Expected `preprocessing.i.kwargs.axes` to be a sequence, but got {type(kwargs_axes)}"
    +1310                )
    +1311
    +1312            if any(a not in axes_ids for a in kwargs_axes):
    +1313                raise ValueError(
    +1314                    "`preprocessing.i.kwargs.axes` needs to be subset of axes ids"
    +1315                )
    +1316
    +1317        if isinstance(self.data, (NominalOrOrdinalDataDescr, IntervalOrRatioDataDescr)):
    +1318            dtype = self.data.type
    +1319        else:
    +1320            dtype = self.data[0].type
    +1321
    +1322        # ensure `preprocessing` begins with `EnsureDtypeDescr`
    +1323        if not self.preprocessing or not isinstance(
    +1324            self.preprocessing[0], EnsureDtypeDescr
    +1325        ):
    +1326            self.preprocessing.insert(
    +1327                0, EnsureDtypeDescr(kwargs=EnsureDtypeKwargs(dtype=dtype))
    +1328            )
    +1329
    +1330        # ensure `preprocessing` ends with `EnsureDtypeDescr` or `BinarizeDescr`
    +1331        if not isinstance(self.preprocessing[-1], (EnsureDtypeDescr, BinarizeDescr)):
    +1332            self.preprocessing.append(
    +1333                EnsureDtypeDescr(kwargs=EnsureDtypeKwargs(dtype=dtype))
    +1334            )
    +1335
    +1336        return self
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + id: TensorId + + +
    + + +

    Input tensor id. +No duplicates are allowed across all inputs and outputs.

    +
    + + +
    +
    +
    + optional: bool + + +
    + + +

    indicates that this tensor may be None

    +
    + + +
    +
    +
    + preprocessing: List[Annotated[Union[BinarizeDescr, ClipDescr, EnsureDtypeDescr, ScaleLinearDescr, SigmoidDescr, FixedZeroMeanUnitVarianceDescr, ZeroMeanUnitVarianceDescr, ScaleRangeDescr], Discriminator(discriminator='id', custom_error_type=None, custom_error_message=None, custom_error_context=None)]] + + +
    + + +

    Description of how this input should be preprocessed.

    + +

    notes:

    + +
      +
    • If preprocessing does not start with an 'ensure_dtype' entry, it is added +to ensure an input tensor's data type matches the input tensor's data description.
    • +
    • If preprocessing does not end with an 'ensure_dtype' or 'binarize' entry, an +'ensure_dtype' step is added to ensure preprocessing steps are not unintentionally +changing the data type.
    • +
    +
    + + +
    + +
    +
    + +
    + + def + convert_axes( axes: str, *, shape: Union[Sequence[int], bioimageio.spec.model.v0_4.ParameterizedInputShape, bioimageio.spec.model.v0_4.ImplicitOutputShape], tensor_type: Literal['input', 'output'], halo: Optional[Sequence[int]], size_refs: Mapping[bioimageio.spec.model.v0_4.TensorName, Mapping[str, int]]): + + + +
    + +
    1339def convert_axes(
    +1340    axes: str,
    +1341    *,
    +1342    shape: Union[
    +1343        Sequence[int], _ParameterizedInputShape_v0_4, _ImplicitOutputShape_v0_4
    +1344    ],
    +1345    tensor_type: Literal["input", "output"],
    +1346    halo: Optional[Sequence[int]],
    +1347    size_refs: Mapping[_TensorName_v0_4, Mapping[str, int]],
    +1348):
    +1349    ret: List[AnyAxis] = []
    +1350    for i, a in enumerate(axes):
    +1351        axis_type = _AXIS_TYPE_MAP.get(a, a)
    +1352        if axis_type == "batch":
    +1353            ret.append(BatchAxis())
    +1354            continue
    +1355
    +1356        scale = 1.0
    +1357        if isinstance(shape, _ParameterizedInputShape_v0_4):
    +1358            if shape.step[i] == 0:
    +1359                size = shape.min[i]
    +1360            else:
    +1361                size = ParameterizedSize(min=shape.min[i], step=shape.step[i])
    +1362        elif isinstance(shape, _ImplicitOutputShape_v0_4):
    +1363            ref_t = str(shape.reference_tensor)
    +1364            if ref_t.count(".") == 1:
    +1365                t_id, orig_a_id = ref_t.split(".")
    +1366            else:
    +1367                t_id = ref_t
    +1368                orig_a_id = a
    +1369
    +1370            a_id = _AXIS_ID_MAP.get(orig_a_id, a)
    +1371            if not (orig_scale := shape.scale[i]):
    +1372                # old way to insert a new axis dimension
    +1373                size = int(2 * shape.offset[i])
    +1374            else:
    +1375                scale = 1 / orig_scale
    +1376                if axis_type in ("channel", "index"):
    +1377                    # these axes no longer have a scale
    +1378                    offset_from_scale = orig_scale * size_refs.get(
    +1379                        _TensorName_v0_4(t_id), {}
    +1380                    ).get(orig_a_id, 0)
    +1381                else:
    +1382                    offset_from_scale = 0
    +1383                size = SizeReference(
    +1384                    tensor_id=TensorId(t_id),
    +1385                    axis_id=AxisId(a_id),
    +1386                    offset=int(offset_from_scale + 2 * shape.offset[i]),
    +1387                )
    +1388        else:
    +1389            size = shape[i]
    +1390
    +1391        if axis_type == "time":
    +1392            if tensor_type == "input":
    +1393                ret.append(TimeInputAxis(size=size, scale=scale))
    +1394            else:
    +1395                assert not isinstance(size, ParameterizedSize)
    +1396                if halo is None:
    +1397                    ret.append(TimeOutputAxis(size=size, scale=scale))
    +1398                else:
    +1399                    assert not isinstance(size, int)
    +1400                    ret.append(
    +1401                        TimeOutputAxisWithHalo(size=size, scale=scale, halo=halo[i])
    +1402                    )
    +1403
    +1404        elif axis_type == "index":
    +1405            if tensor_type == "input":
    +1406                ret.append(IndexInputAxis(size=size))
    +1407            else:
    +1408                if isinstance(size, ParameterizedSize):
    +1409                    size = DataDependentSize(min=size.min)
    +1410
    +1411                ret.append(IndexOutputAxis(size=size))
    +1412        elif axis_type == "channel":
    +1413            assert not isinstance(size, ParameterizedSize)
    +1414            if isinstance(size, SizeReference):
    +1415                warnings.warn(
    +1416                    "Conversion of channel size from an implicit output shape may be"
    +1417                    + " wrong"
    +1418                )
    +1419                ret.append(
    +1420                    ChannelAxis(
    +1421                        channel_names=[
    +1422                            Identifier(f"channel{i}") for i in range(size.offset)
    +1423                        ]
    +1424                    )
    +1425                )
    +1426            else:
    +1427                ret.append(
    +1428                    ChannelAxis(
    +1429                        channel_names=[Identifier(f"channel{i}") for i in range(size)]
    +1430                    )
    +1431                )
    +1432        elif axis_type == "space":
    +1433            if tensor_type == "input":
    +1434                ret.append(SpaceInputAxis(id=AxisId(a), size=size, scale=scale))
    +1435            else:
    +1436                assert not isinstance(size, ParameterizedSize)
    +1437                if halo is None or halo[i] == 0:
    +1438                    ret.append(SpaceOutputAxis(id=AxisId(a), size=size, scale=scale))
    +1439                elif isinstance(size, int):
    +1440                    raise NotImplementedError(
    +1441                        f"output axis with halo and fixed size (here {size}) not allowed"
    +1442                    )
    +1443                else:
    +1444                    ret.append(
    +1445                        SpaceOutputAxisWithHalo(
    +1446                            id=AxisId(a), size=size, scale=scale, halo=halo[i]
    +1447                        )
    +1448                    )
    +1449
    +1450    return ret
    +
    + + + + +
    +
    + +
    + + class + OutputTensorDescr(bioimageio.spec._internal.node.Node, typing.Generic[~IO_AxisT]): + + + +
    + +
    1625class OutputTensorDescr(TensorDescrBase[OutputAxis]):
    +1626    id: TensorId = TensorId("output")
    +1627    """Output tensor id.
    +1628    No duplicates are allowed across all inputs and outputs."""
    +1629
    +1630    postprocessing: List[PostprocessingDescr] = Field(default_factory=list)
    +1631    """Description of how this output should be postprocessed.
    +1632
    +1633    note: `postprocessing` always ends with an 'ensure_dtype' operation.
    +1634          If not given this is added to cast to this tensor's `data.type`.
    +1635    """
    +1636
    +1637    @model_validator(mode="after")
    +1638    def _validate_postprocessing_kwargs(self) -> Self:
    +1639        axes_ids = [a.id for a in self.axes]
    +1640        for p in self.postprocessing:
    +1641            kwargs_axes: Optional[Sequence[Any]] = p.kwargs.get("axes")
    +1642            if kwargs_axes is None:
    +1643                continue
    +1644
    +1645            if not isinstance(kwargs_axes, collections.abc.Sequence):
    +1646                raise ValueError(
    +1647                    f"expected `axes` sequence, but got {type(kwargs_axes)}"
    +1648                )
    +1649
    +1650            if any(a not in axes_ids for a in kwargs_axes):
    +1651                raise ValueError("`kwargs.axes` needs to be subset of axes ids")
    +1652
    +1653        if isinstance(self.data, (NominalOrOrdinalDataDescr, IntervalOrRatioDataDescr)):
    +1654            dtype = self.data.type
    +1655        else:
    +1656            dtype = self.data[0].type
    +1657
    +1658        # ensure `postprocessing` ends with `EnsureDtypeDescr` or `BinarizeDescr`
    +1659        if not self.postprocessing or not isinstance(
    +1660            self.postprocessing[-1], (EnsureDtypeDescr, BinarizeDescr)
    +1661        ):
    +1662            self.postprocessing.append(
    +1663                EnsureDtypeDescr(kwargs=EnsureDtypeKwargs(dtype=dtype))
    +1664            )
    +1665        return self
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + id: TensorId + + +
    + + +

    Output tensor id. +No duplicates are allowed across all inputs and outputs.

    +
    + + +
    +
    +
    + postprocessing: List[Annotated[Union[BinarizeDescr, ClipDescr, EnsureDtypeDescr, ScaleLinearDescr, SigmoidDescr, FixedZeroMeanUnitVarianceDescr, ZeroMeanUnitVarianceDescr, ScaleRangeDescr, ScaleMeanVarianceDescr], Discriminator(discriminator='id', custom_error_type=None, custom_error_message=None, custom_error_context=None)]] + + +
    + + +

    Description of how this output should be postprocessed.

    + +

    note: postprocessing always ends with an 'ensure_dtype' operation. + If not given this is added to cast to this tensor's data.type.

    +
    + + +
    + +
    +
    +
    + TensorDescr = + + typing.Union[InputTensorDescr, OutputTensorDescr] + + +
    + + + + +
    +
    + +
    + + def + validate_tensors( tensors: Mapping[TensorId, Tuple[Union[InputTensorDescr, OutputTensorDescr], numpy.ndarray[Any, numpy.dtype[Any]]]], tensor_origin: str): + + + +
    + +
    1715def validate_tensors(
    +1716    tensors: Mapping[TensorId, Tuple[TensorDescr, NDArray[Any]]],
    +1717    tensor_origin: str,  # for more precise error messages, e.g. 'test_tensor'
    +1718):
    +1719    all_tensor_axes: Dict[TensorId, Dict[AxisId, Tuple[AnyAxis, int]]] = {}
    +1720
    +1721    def e_msg(d: TensorDescr):
    +1722        return f"{'inputs' if isinstance(d, InputTensorDescr) else 'outputs'}[{d.id}]"
    +1723
    +1724    for descr, array in tensors.values():
    +1725        try:
    +1726            axis_sizes = descr.get_axis_sizes_for_array(array)
    +1727        except ValueError as e:
    +1728            raise ValueError(f"{e_msg(descr)} {e}")
    +1729        else:
    +1730            all_tensor_axes[descr.id] = {
    +1731                a.id: (a, axis_sizes[a.id]) for a in descr.axes
    +1732            }
    +1733
    +1734    for descr, array in tensors.values():
    +1735        if array.dtype.name != descr.dtype:
    +1736            raise ValueError(
    +1737                f"{e_msg(descr)}.{tensor_origin}.dtype '{array.dtype.name}' does not"
    +1738                + f" match described dtype '{descr.dtype}'"
    +1739            )
    +1740
    +1741        for a in descr.axes:
    +1742            actual_size = all_tensor_axes[descr.id][a.id][1]
    +1743            if a.size is None:
    +1744                continue
    +1745
    +1746            if isinstance(a.size, int):
    +1747                if actual_size != a.size:
    +1748                    raise ValueError(
    +1749                        f"{e_msg(descr)}.{tensor_origin}: axis '{a.id}' "
    +1750                        + f"has incompatible size {actual_size}, expected {a.size}"
    +1751                    )
    +1752            elif isinstance(a.size, ParameterizedSize):
    +1753                _ = a.size.validate_size(actual_size)
    +1754            elif isinstance(a.size, DataDependentSize):
    +1755                _ = a.size.validate_size(actual_size)
    +1756            elif isinstance(a.size, SizeReference):
    +1757                ref_tensor_axes = all_tensor_axes.get(a.size.tensor_id)
    +1758                if ref_tensor_axes is None:
    +1759                    raise ValueError(
    +1760                        f"{e_msg(descr)}.axes[{a.id}].size.tensor_id: Unknown tensor"
    +1761                        + f" reference '{a.size.tensor_id}'"
    +1762                    )
    +1763
    +1764                ref_axis, ref_size = ref_tensor_axes.get(a.size.axis_id, (None, None))
    +1765                if ref_axis is None or ref_size is None:
    +1766                    raise ValueError(
    +1767                        f"{e_msg(descr)}.axes[{a.id}].size.axis_id: Unknown tensor axis"
    +1768                        + f" reference '{a.size.tensor_id}.{a.size.axis_id}"
    +1769                    )
    +1770
    +1771                if a.unit != ref_axis.unit:
    +1772                    raise ValueError(
    +1773                        f"{e_msg(descr)}.axes[{a.id}].size: `SizeReference` requires"
    +1774                        + " axis and reference axis to have the same `unit`, but"
    +1775                        + f" {a.unit}!={ref_axis.unit}"
    +1776                    )
    +1777
    +1778                if actual_size != (
    +1779                    expected_size := (
    +1780                        ref_size * ref_axis.scale / a.scale + a.size.offset
    +1781                    )
    +1782                ):
    +1783                    raise ValueError(
    +1784                        f"{e_msg(descr)}.{tensor_origin}: axis '{a.id}' of size"
    +1785                        + f" {actual_size} invalid for referenced size {ref_size};"
    +1786                        + f" expected {expected_size}"
    +1787                    )
    +1788            else:
    +1789                assert_never(a.size)
    +
    + + + + +
    +
    + +
    + + class + EnvironmentFileDescr(bioimageio.spec._internal.io.FileDescr): + + + +
    + +
    1792class EnvironmentFileDescr(FileDescr):
    +1793    source: Annotated[
    +1794        ImportantFileSource,
    +1795        WithSuffix((".yaml", ".yml"), case_sensitive=True),
    +1796        Field(
    +1797            examples=["environment.yaml"],
    +1798        ),
    +1799    ]
    +1800    """∈📦 Conda environment file.
    +1801    Allows to specify custom dependencies, see conda docs:
    +1802    - [Exporting an environment file across platforms](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#exporting-an-environment-file-across-platforms)
    +1803    - [Creating an environment file manually](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#creating-an-environment-file-manually)
    +1804    """
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + source: Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none'), WithSuffix(suffix=('.yaml', '.yml'), case_sensitive=True), FieldInfo(annotation=NoneType, required=True, examples=['environment.yaml'])] + + +
    + + +

    ∈📦 Conda environment file. +Allows to specify custom dependencies, see conda docs:

    + + +
    + + +
    +
    +
    + +
    + + class + ArchitectureFromFileDescr(_ArchitectureCallableDescr, bioimageio.spec._internal.io.FileDescr): + + + +
    + +
    1815class ArchitectureFromFileDescr(_ArchitectureCallableDescr, FileDescr):
    +1816    pass
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + ArchitectureFromLibraryDescr(_ArchitectureCallableDescr): + + + +
    + +
    1819class ArchitectureFromLibraryDescr(_ArchitectureCallableDescr):
    +1820    import_from: str
    +1821    """Where to import the callable from, i.e. `from <import_from> import <callable>`"""
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + import_from: str + + +
    + + +

    Where to import the callable from, i.e. from <import_from> import <callable>

    +
    + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    +
    + ArchitectureDescr = + + typing.Annotated[typing.Union[ArchitectureFromFileDescr, ArchitectureFromLibraryDescr], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])] + + +
    + + + + +
    +
    + +
    + + class + WeightsEntryDescrBase(bioimageio.spec._internal.io.FileDescr): + + + +
    + +
    1887class WeightsEntryDescrBase(FileDescr):
    +1888    type: ClassVar[WeightsFormat]
    +1889    weights_format_name: ClassVar[str]  # human readable
    +1890
    +1891    source: ImportantFileSource
    +1892    """∈📦 The weights file."""
    +1893
    +1894    authors: Optional[List[Author]] = None
    +1895    """Authors
    +1896    Either the person(s) that have trained this model resulting in the original weights file.
    +1897        (If this is the initial weights entry, i.e. it does not have a `parent`)
    +1898    Or the person(s) who have converted the weights to this weights format.
    +1899        (If this is a child weight, i.e. it has a `parent` field)
    +1900    """
    +1901
    +1902    parent: Annotated[
    +1903        Optional[WeightsFormat], Field(examples=["pytorch_state_dict"])
    +1904    ] = None
    +1905    """The source weights these weights were converted from.
    +1906    For example, if a model's weights were converted from the `pytorch_state_dict` format to `torchscript`,
    +1907    The `pytorch_state_dict` weights entry has no `parent` and is the parent of the `torchscript` weights.
    +1908    All weight entries except one (the initial set of weights resulting from training the model),
    +1909    need to have this field."""
    +1910
    +1911    @model_validator(mode="after")
    +1912    def check_parent_is_not_self(self) -> Self:
    +1913        if self.type == self.parent:
    +1914            raise ValueError("Weights entry can't be it's own parent.")
    +1915
    +1916        return self
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type: ClassVar[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']] + + +
    + + + + +
    +
    +
    + weights_format_name: ClassVar[str] + + +
    + + + + +
    +
    +
    + source: Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')] + + +
    + + +

    ∈📦 The weights file.

    +
    + + +
    +
    +
    + authors: Optional[List[bioimageio.spec.generic.v0_3.Author]] + + +
    + + +

    Authors +Either the person(s) that have trained this model resulting in the original weights file. + (If this is the initial weights entry, i.e. it does not have a parent) +Or the person(s) who have converted the weights to this weights format. + (If this is a child weight, i.e. it has a parent field)

    +
    + + +
    +
    +
    + parent: Annotated[Optional[Literal['keras_hdf5', 'onnx', 'pytorch_state_dict', 'tensorflow_js', 'tensorflow_saved_model_bundle', 'torchscript']], FieldInfo(annotation=NoneType, required=True, examples=['pytorch_state_dict'])] + + +
    + + +

    The source weights these weights were converted from. +For example, if a model's weights were converted from the pytorch_state_dict format to torchscript, +The pytorch_state_dict weights entry has no parent and is the parent of the torchscript weights. +All weight entries except one (the initial set of weights resulting from training the model), +need to have this field.

    +
    + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + check_parent_is_not_self(self) -> Self: + + + +
    + +
    1911    @model_validator(mode="after")
    +1912    def check_parent_is_not_self(self) -> Self:
    +1913        if self.type == self.parent:
    +1914            raise ValueError("Weights entry can't be it's own parent.")
    +1915
    +1916        return self
    +
    + + + + +
    +
    +
    + +
    + + class + KerasHdf5WeightsDescr(WeightsEntryDescrBase): + + + +
    + +
    1919class KerasHdf5WeightsDescr(WeightsEntryDescrBase):
    +1920    type = "keras_hdf5"
    +1921    weights_format_name: ClassVar[str] = "Keras HDF5"
    +1922    tensorflow_version: Version
    +1923    """TensorFlow version used to create these weights."""
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type = +'keras_hdf5' + + +
    + + + + +
    +
    +
    + weights_format_name: ClassVar[str] = +'Keras HDF5' + + +
    + + + + +
    +
    +
    + tensorflow_version: bioimageio.spec._internal.version_type.Version + + +
    + + +

    TensorFlow version used to create these weights.

    +
    + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + OnnxWeightsDescr(WeightsEntryDescrBase): + + + +
    + +
    1926class OnnxWeightsDescr(WeightsEntryDescrBase):
    +1927    type = "onnx"
    +1928    weights_format_name: ClassVar[str] = "ONNX"
    +1929    opset_version: Annotated[int, Ge(7)]
    +1930    """ONNX opset version"""
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type = +'onnx' + + +
    + + + + +
    +
    +
    + weights_format_name: ClassVar[str] = +'ONNX' + + +
    + + + + +
    +
    +
    + opset_version: Annotated[int, Ge(ge=7)] + + +
    + + +

    ONNX opset version

    +
    + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + PytorchStateDictWeightsDescr(WeightsEntryDescrBase): + + + +
    + +
    1933class PytorchStateDictWeightsDescr(WeightsEntryDescrBase):
    +1934    type = "pytorch_state_dict"
    +1935    weights_format_name: ClassVar[str] = "Pytorch State Dict"
    +1936    architecture: ArchitectureDescr
    +1937    pytorch_version: Version
    +1938    """Version of the PyTorch library used.
    +1939    If `architecture.depencencies` is specified it has to include pytorch and any version pinning has to be compatible.
    +1940    """
    +1941    dependencies: Optional[EnvironmentFileDescr] = None
    +1942    """Custom depencies beyond pytorch.
    +1943    The conda environment file should include pytorch and any version pinning has to be compatible with
    +1944    `pytorch_version`.
    +1945    """
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type = +'pytorch_state_dict' + + +
    + + + + +
    +
    +
    + weights_format_name: ClassVar[str] = +'Pytorch State Dict' + + +
    + + + + +
    +
    +
    + architecture: Annotated[Union[ArchitectureFromFileDescr, ArchitectureFromLibraryDescr], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])] + + +
    + + + + +
    +
    +
    + pytorch_version: bioimageio.spec._internal.version_type.Version + + +
    + + +

    Version of the PyTorch library used. +If architecture.depencencies is specified it has to include pytorch and any version pinning has to be compatible.

    +
    + + +
    +
    +
    + dependencies: Optional[EnvironmentFileDescr] + + +
    + + +

    Custom depencies beyond pytorch. +The conda environment file should include pytorch and any version pinning has to be compatible with +pytorch_version.

    +
    + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + TensorflowJsWeightsDescr(WeightsEntryDescrBase): + + + +
    + +
    1948class TensorflowJsWeightsDescr(WeightsEntryDescrBase):
    +1949    type = "tensorflow_js"
    +1950    weights_format_name: ClassVar[str] = "Tensorflow.js"
    +1951    tensorflow_version: Version
    +1952    """Version of the TensorFlow library used."""
    +1953
    +1954    source: ImportantFileSource
    +1955    """∈📦 The multi-file weights.
    +1956    All required files/folders should be a zip archive."""
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type = +'tensorflow_js' + + +
    + + + + +
    +
    +
    + weights_format_name: ClassVar[str] = +'Tensorflow.js' + + +
    + + + + +
    +
    +
    + tensorflow_version: bioimageio.spec._internal.version_type.Version + + +
    + + +

    Version of the TensorFlow library used.

    +
    + + +
    +
    +
    + source: Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')] + + +
    + + +

    ∈📦 The multi-file weights. +All required files/folders should be a zip archive.

    +
    + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + TensorflowSavedModelBundleWeightsDescr(WeightsEntryDescrBase): + + + +
    + +
    1959class TensorflowSavedModelBundleWeightsDescr(WeightsEntryDescrBase):
    +1960    type = "tensorflow_saved_model_bundle"
    +1961    weights_format_name: ClassVar[str] = "Tensorflow Saved Model"
    +1962    tensorflow_version: Version
    +1963    """Version of the TensorFlow library used."""
    +1964
    +1965    dependencies: Optional[EnvironmentFileDescr] = None
    +1966    """Custom dependencies beyond tensorflow.
    +1967    Should include tensorflow and any version pinning has to be compatible with `tensorflow_version`."""
    +1968
    +1969    source: ImportantFileSource
    +1970    """∈📦 The multi-file weights.
    +1971    All required files/folders should be a zip archive."""
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type = +'tensorflow_saved_model_bundle' + + +
    + + + + +
    +
    +
    + weights_format_name: ClassVar[str] = +'Tensorflow Saved Model' + + +
    + + + + +
    +
    +
    + tensorflow_version: bioimageio.spec._internal.version_type.Version + + +
    + + +

    Version of the TensorFlow library used.

    +
    + + +
    +
    +
    + dependencies: Optional[EnvironmentFileDescr] + + +
    + + +

    Custom dependencies beyond tensorflow. +Should include tensorflow and any version pinning has to be compatible with tensorflow_version.

    +
    + + +
    +
    +
    + source: Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f538192cea0>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none')] + + +
    + + +

    ∈📦 The multi-file weights. +All required files/folders should be a zip archive.

    +
    + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + TorchscriptWeightsDescr(WeightsEntryDescrBase): + + + +
    + +
    1974class TorchscriptWeightsDescr(WeightsEntryDescrBase):
    +1975    type = "torchscript"
    +1976    weights_format_name: ClassVar[str] = "TorchScript"
    +1977    pytorch_version: Version
    +1978    """Version of the PyTorch library used."""
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + type = +'torchscript' + + +
    + + + + +
    +
    +
    + weights_format_name: ClassVar[str] = +'TorchScript' + + +
    + + + + +
    +
    +
    + pytorch_version: bioimageio.spec._internal.version_type.Version + + +
    + + +

    Version of the PyTorch library used.

    +
    + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + WeightsDescr(bioimageio.spec._internal.node.Node): + + + +
    + +
    1981class WeightsDescr(Node):
    +1982    keras_hdf5: Optional[KerasHdf5WeightsDescr] = None
    +1983    onnx: Optional[OnnxWeightsDescr] = None
    +1984    pytorch_state_dict: Optional[PytorchStateDictWeightsDescr] = None
    +1985    tensorflow_js: Optional[TensorflowJsWeightsDescr] = None
    +1986    tensorflow_saved_model_bundle: Optional[TensorflowSavedModelBundleWeightsDescr] = (
    +1987        None
    +1988    )
    +1989    torchscript: Optional[TorchscriptWeightsDescr] = None
    +1990
    +1991    @model_validator(mode="after")
    +1992    def check_entries(self) -> Self:
    +1993        entries = {wtype for wtype, entry in self if entry is not None}
    +1994
    +1995        if not entries:
    +1996            raise ValueError("Missing weights entry")
    +1997
    +1998        entries_wo_parent = {
    +1999            wtype
    +2000            for wtype, entry in self
    +2001            if entry is not None and hasattr(entry, "parent") and entry.parent is None
    +2002        }
    +2003        if len(entries_wo_parent) != 1:
    +2004            issue_warning(
    +2005                "Exactly one weights entry may not specify the `parent` field (got"
    +2006                + " {value}). That entry is considered the original set of model weights."
    +2007                + " Other weight formats are created through conversion of the orignal or"
    +2008                + " already converted weights. They have to reference the weights format"
    +2009                + " they were converted from as their `parent`.",
    +2010                value=len(entries_wo_parent),
    +2011                field="weights",
    +2012            )
    +2013
    +2014        for wtype, entry in self:
    +2015            if entry is None:
    +2016                continue
    +2017
    +2018            assert hasattr(entry, "type")
    +2019            assert hasattr(entry, "parent")
    +2020            assert wtype == entry.type
    +2021            if (
    +2022                entry.parent is not None and entry.parent not in entries
    +2023            ):  # self reference checked for `parent` field
    +2024                raise ValueError(
    +2025                    f"`weights.{wtype}.parent={entry.parent} not in specified weight"
    +2026                    + f" formats: {entries}"
    +2027                )
    +2028
    +2029        return self
    +
    + + +

    Subpart of a resource description

    +
    + + +
    +
    + keras_hdf5: Optional[KerasHdf5WeightsDescr] + + +
    + + + + +
    +
    +
    + onnx: Optional[OnnxWeightsDescr] + + +
    + + + + +
    +
    +
    + pytorch_state_dict: Optional[PytorchStateDictWeightsDescr] + + +
    + + + + +
    +
    +
    + tensorflow_js: Optional[TensorflowJsWeightsDescr] + + +
    + + + + +
    +
    +
    + tensorflow_saved_model_bundle: Optional[TensorflowSavedModelBundleWeightsDescr] + + +
    + + + + +
    +
    +
    + torchscript: Optional[TorchscriptWeightsDescr] + + +
    + + + + +
    +
    + +
    +
    @model_validator(mode='after')
    + + def + check_entries(self) -> Self: + + + +
    + +
    1991    @model_validator(mode="after")
    +1992    def check_entries(self) -> Self:
    +1993        entries = {wtype for wtype, entry in self if entry is not None}
    +1994
    +1995        if not entries:
    +1996            raise ValueError("Missing weights entry")
    +1997
    +1998        entries_wo_parent = {
    +1999            wtype
    +2000            for wtype, entry in self
    +2001            if entry is not None and hasattr(entry, "parent") and entry.parent is None
    +2002        }
    +2003        if len(entries_wo_parent) != 1:
    +2004            issue_warning(
    +2005                "Exactly one weights entry may not specify the `parent` field (got"
    +2006                + " {value}). That entry is considered the original set of model weights."
    +2007                + " Other weight formats are created through conversion of the orignal or"
    +2008                + " already converted weights. They have to reference the weights format"
    +2009                + " they were converted from as their `parent`.",
    +2010                value=len(entries_wo_parent),
    +2011                field="weights",
    +2012            )
    +2013
    +2014        for wtype, entry in self:
    +2015            if entry is None:
    +2016                continue
    +2017
    +2018            assert hasattr(entry, "type")
    +2019            assert hasattr(entry, "parent")
    +2020            assert wtype == entry.type
    +2021            if (
    +2022                entry.parent is not None and entry.parent not in entries
    +2023            ):  # self reference checked for `parent` field
    +2024                raise ValueError(
    +2025                    f"`weights.{wtype}.parent={entry.parent} not in specified weight"
    +2026                    + f" formats: {entries}"
    +2027                )
    +2028
    +2029        return self
    +
    + + + + +
    +
    +
    + +
    + + class + ModelId(bioimageio.spec.generic.v0_3.ResourceId): + + + +
    + +
    2032class ModelId(ResourceId):
    +2033    pass
    +
    + + +

    str(object='') -> str +str(bytes_or_buffer[, encoding[, errors]]) -> str

    + +

    Create a new string object from the given object. If encoding or +errors is specified, then the object must expose a data buffer +that will be decoded using the given encoding and error handler. +Otherwise, returns the result of object.__str__() (if defined) +or repr(object). +encoding defaults to sys.getdefaultencoding(). +errors defaults to 'strict'.

    +
    + + +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + LinkedModel(bioimageio.spec.generic.v0_3.LinkedResourceNode): + + + +
    + +
    2036class LinkedModel(LinkedResourceNode):
    +2037    """Reference to a bioimage.io model."""
    +2038
    +2039    id: ModelId
    +2040    """A valid model `id` from the bioimage.io collection."""
    +
    + + +

    Reference to a bioimage.io model.

    +
    + + +
    +
    + id: ModelId + + +
    + + +

    A valid model id from the bioimage.io collection.

    +
    + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + ModelDescr(bioimageio.spec.generic.v0_3.GenericModelDescrBase): + + + +
    + +
    2062class ModelDescr(GenericModelDescrBase, title="bioimage.io model specification"):
    +2063    """Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights.
    +2064    These fields are typically stored in a YAML file which we call a model resource description file (model RDF).
    +2065    """
    +2066
    +2067    format_version: Literal["0.5.3"] = "0.5.3"
    +2068    """Version of the bioimage.io model description specification used.
    +2069    When creating a new model always use the latest micro/patch version described here.
    +2070    The `format_version` is important for any consumer software to understand how to parse the fields.
    +2071    """
    +2072
    +2073    type: Literal["model"] = "model"
    +2074    """Specialized resource type 'model'"""
    +2075
    +2076    id: Optional[ModelId] = None
    +2077    """bioimage.io-wide unique resource identifier
    +2078    assigned by bioimage.io; version **un**specific."""
    +2079
    +2080    authors: NotEmpty[List[Author]]
    +2081    """The authors are the creators of the model RDF and the primary points of contact."""
    +2082
    +2083    documentation: Annotated[
    +2084        DocumentationSource,
    +2085        Field(
    +2086            examples=[
    +2087                "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
    +2088                "README.md",
    +2089            ],
    +2090        ),
    +2091    ]
    +2092    """∈📦 URL or relative path to a markdown file with additional documentation.
    +2093    The recommended documentation file name is `README.md`. An `.md` suffix is mandatory.
    +2094    The documentation should include a '#[#] Validation' (sub)section
    +2095    with details on how to quantitatively validate the model on unseen data."""
    +2096
    +2097    @field_validator("documentation", mode="after")
    +2098    @classmethod
    +2099    def _validate_documentation(cls, value: DocumentationSource) -> DocumentationSource:
    +2100        if not validation_context_var.get().perform_io_checks:
    +2101            return value
    +2102
    +2103        doc_path = download(value).path
    +2104        doc_content = doc_path.read_text(encoding="utf-8")
    +2105        assert isinstance(doc_content, str)
    +2106        if not re.match("#.*[vV]alidation", doc_content):
    +2107            issue_warning(
    +2108                "No '# Validation' (sub)section found in {value}.",
    +2109                value=value,
    +2110                field="documentation",
    +2111            )
    +2112
    +2113        return value
    +2114
    +2115    inputs: NotEmpty[Sequence[InputTensorDescr]]
    +2116    """Describes the input tensors expected by this model."""
    +2117
    +2118    @field_validator("inputs", mode="after")
    +2119    @classmethod
    +2120    def _validate_input_axes(
    +2121        cls, inputs: Sequence[InputTensorDescr]
    +2122    ) -> Sequence[InputTensorDescr]:
    +2123        input_size_refs = cls._get_axes_with_independent_size(inputs)
    +2124
    +2125        for i, ipt in enumerate(inputs):
    +2126            valid_independent_refs: Dict[
    +2127                Tuple[TensorId, AxisId],
    +2128                Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    +2129            ] = {
    +2130                **{
    +2131                    (ipt.id, a.id): (ipt, a, a.size)
    +2132                    for a in ipt.axes
    +2133                    if not isinstance(a, BatchAxis)
    +2134                    and isinstance(a.size, (int, ParameterizedSize))
    +2135                },
    +2136                **input_size_refs,
    +2137            }
    +2138            for a, ax in enumerate(ipt.axes):
    +2139                cls._validate_axis(
    +2140                    "inputs",
    +2141                    i=i,
    +2142                    tensor_id=ipt.id,
    +2143                    a=a,
    +2144                    axis=ax,
    +2145                    valid_independent_refs=valid_independent_refs,
    +2146                )
    +2147        return inputs
    +2148
    +2149    @staticmethod
    +2150    def _validate_axis(
    +2151        field_name: str,
    +2152        i: int,
    +2153        tensor_id: TensorId,
    +2154        a: int,
    +2155        axis: AnyAxis,
    +2156        valid_independent_refs: Dict[
    +2157            Tuple[TensorId, AxisId],
    +2158            Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    +2159        ],
    +2160    ):
    +2161        if isinstance(axis, BatchAxis) or isinstance(
    +2162            axis.size, (int, ParameterizedSize, DataDependentSize)
    +2163        ):
    +2164            return
    +2165        elif not isinstance(axis.size, SizeReference):
    +2166            assert_never(axis.size)
    +2167
    +2168        # validate axis.size SizeReference
    +2169        ref = (axis.size.tensor_id, axis.size.axis_id)
    +2170        if ref not in valid_independent_refs:
    +2171            raise ValueError(
    +2172                "Invalid tensor axis reference at"
    +2173                + f" {field_name}[{i}].axes[{a}].size: {axis.size}."
    +2174            )
    +2175        if ref == (tensor_id, axis.id):
    +2176            raise ValueError(
    +2177                "Self-referencing not allowed for"
    +2178                + f" {field_name}[{i}].axes[{a}].size: {axis.size}"
    +2179            )
    +2180        if axis.type == "channel":
    +2181            if valid_independent_refs[ref][1].type != "channel":
    +2182                raise ValueError(
    +2183                    "A channel axis' size may only reference another fixed size"
    +2184                    + " channel axis."
    +2185                )
    +2186            if isinstance(axis.channel_names, str) and "{i}" in axis.channel_names:
    +2187                ref_size = valid_independent_refs[ref][2]
    +2188                assert isinstance(ref_size, int), (
    +2189                    "channel axis ref (another channel axis) has to specify fixed"
    +2190                    + " size"
    +2191                )
    +2192                generated_channel_names = [
    +2193                    Identifier(axis.channel_names.format(i=i))
    +2194                    for i in range(1, ref_size + 1)
    +2195                ]
    +2196                axis.channel_names = generated_channel_names
    +2197
    +2198        if (ax_unit := getattr(axis, "unit", None)) != (
    +2199            ref_unit := getattr(valid_independent_refs[ref][1], "unit", None)
    +2200        ):
    +2201            raise ValueError(
    +2202                "The units of an axis and its reference axis need to match, but"
    +2203                + f" '{ax_unit}' != '{ref_unit}'."
    +2204            )
    +2205        ref_axis = valid_independent_refs[ref][1]
    +2206        if isinstance(ref_axis, BatchAxis):
    +2207            raise ValueError(
    +2208                f"Invalid reference axis '{ref_axis.id}' for {tensor_id}.{axis.id}"
    +2209                + " (a batch axis is not allowed as reference)."
    +2210            )
    +2211
    +2212        if isinstance(axis, WithHalo):
    +2213            min_size = axis.size.get_size(axis, ref_axis, n=0)
    +2214            if (min_size - 2 * axis.halo) < 1:
    +2215                raise ValueError(
    +2216                    f"axis {axis.id} with minimum size {min_size} is too small for halo"
    +2217                    + f" {axis.halo}."
    +2218                )
    +2219
    +2220            input_halo = axis.halo * axis.scale / ref_axis.scale
    +2221            if input_halo != int(input_halo) or input_halo % 2 == 1:
    +2222                raise ValueError(
    +2223                    f"input_halo {input_halo} (output_halo {axis.halo} *"
    +2224                    + f" output_scale {axis.scale} / input_scale {ref_axis.scale})"
    +2225                    + f" is not an even integer for {tensor_id}.{axis.id}."
    +2226                )
    +2227
    +2228    @model_validator(mode="after")
    +2229    def _validate_test_tensors(self) -> Self:
    +2230        if not validation_context_var.get().perform_io_checks:
    +2231            return self
    +2232
    +2233        test_arrays = [
    +2234            load_array(descr.test_tensor.download().path)
    +2235            for descr in chain(self.inputs, self.outputs)
    +2236        ]
    +2237        tensors = {
    +2238            descr.id: (descr, array)
    +2239            for descr, array in zip(chain(self.inputs, self.outputs), test_arrays)
    +2240        }
    +2241        validate_tensors(tensors, tensor_origin="test_tensor")
    +2242        return self
    +2243
    +2244    @model_validator(mode="after")
    +2245    def _validate_tensor_references_in_proc_kwargs(self, info: ValidationInfo) -> Self:
    +2246        ipt_refs = {t.id for t in self.inputs}
    +2247        out_refs = {t.id for t in self.outputs}
    +2248        for ipt in self.inputs:
    +2249            for p in ipt.preprocessing:
    +2250                ref = p.kwargs.get("reference_tensor")
    +2251                if ref is None:
    +2252                    continue
    +2253                if ref not in ipt_refs:
    +2254                    raise ValueError(
    +2255                        f"`reference_tensor` '{ref}' not found. Valid input tensor"
    +2256                        + f" references are: {ipt_refs}."
    +2257                    )
    +2258
    +2259        for out in self.outputs:
    +2260            for p in out.postprocessing:
    +2261                ref = p.kwargs.get("reference_tensor")
    +2262                if ref is None:
    +2263                    continue
    +2264
    +2265                if ref not in ipt_refs and ref not in out_refs:
    +2266                    raise ValueError(
    +2267                        f"`reference_tensor` '{ref}' not found. Valid tensor references"
    +2268                        + f" are: {ipt_refs | out_refs}."
    +2269                    )
    +2270
    +2271        return self
    +2272
    +2273    # TODO: use validate funcs in validate_test_tensors
    +2274    # def validate_inputs(self, input_tensors: Mapping[TensorId, NDArray[Any]]) -> Mapping[TensorId, NDArray[Any]]:
    +2275
    +2276    name: Annotated[
    +2277        Annotated[
    +2278            str, RestrictCharacters(string.ascii_letters + string.digits + "_- ()")
    +2279        ],
    +2280        MinLen(5),
    +2281        MaxLen(128),
    +2282        warn(MaxLen(64), "Name longer than 64 characters.", INFO),
    +2283    ]
    +2284    """A human-readable name of this model.
    +2285    It should be no longer than 64 characters
    +2286    and may only contain letter, number, underscore, minus, parentheses and spaces.
    +2287    We recommend to chose a name that refers to the model's task and image modality.
    +2288    """
    +2289
    +2290    outputs: NotEmpty[Sequence[OutputTensorDescr]]
    +2291    """Describes the output tensors."""
    +2292
    +2293    @field_validator("outputs", mode="after")
    +2294    @classmethod
    +2295    def _validate_tensor_ids(
    +2296        cls, outputs: Sequence[OutputTensorDescr], info: ValidationInfo
    +2297    ) -> Sequence[OutputTensorDescr]:
    +2298        tensor_ids = [
    +2299            t.id for t in info.data.get("inputs", []) + info.data.get("outputs", [])
    +2300        ]
    +2301        duplicate_tensor_ids: List[str] = []
    +2302        seen: Set[str] = set()
    +2303        for t in tensor_ids:
    +2304            if t in seen:
    +2305                duplicate_tensor_ids.append(t)
    +2306
    +2307            seen.add(t)
    +2308
    +2309        if duplicate_tensor_ids:
    +2310            raise ValueError(f"Duplicate tensor ids: {duplicate_tensor_ids}")
    +2311
    +2312        return outputs
    +2313
    +2314    @staticmethod
    +2315    def _get_axes_with_parameterized_size(
    +2316        io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
    +2317    ):
    +2318        return {
    +2319            f"{t.id}.{a.id}": (t, a, a.size)
    +2320            for t in io
    +2321            for a in t.axes
    +2322            if not isinstance(a, BatchAxis) and isinstance(a.size, ParameterizedSize)
    +2323        }
    +2324
    +2325    @staticmethod
    +2326    def _get_axes_with_independent_size(
    +2327        io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
    +2328    ):
    +2329        return {
    +2330            (t.id, a.id): (t, a, a.size)
    +2331            for t in io
    +2332            for a in t.axes
    +2333            if not isinstance(a, BatchAxis)
    +2334            and isinstance(a.size, (int, ParameterizedSize))
    +2335        }
    +2336
    +2337    @field_validator("outputs", mode="after")
    +2338    @classmethod
    +2339    def _validate_output_axes(
    +2340        cls, outputs: List[OutputTensorDescr], info: ValidationInfo
    +2341    ) -> List[OutputTensorDescr]:
    +2342        input_size_refs = cls._get_axes_with_independent_size(
    +2343            info.data.get("inputs", [])
    +2344        )
    +2345        output_size_refs = cls._get_axes_with_independent_size(outputs)
    +2346
    +2347        for i, out in enumerate(outputs):
    +2348            valid_independent_refs: Dict[
    +2349                Tuple[TensorId, AxisId],
    +2350                Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    +2351            ] = {
    +2352                **{
    +2353                    (out.id, a.id): (out, a, a.size)
    +2354                    for a in out.axes
    +2355                    if not isinstance(a, BatchAxis)
    +2356                    and isinstance(a.size, (int, ParameterizedSize))
    +2357                },
    +2358                **input_size_refs,
    +2359                **output_size_refs,
    +2360            }
    +2361            for a, ax in enumerate(out.axes):
    +2362                cls._validate_axis(
    +2363                    "outputs",
    +2364                    i,
    +2365                    out.id,
    +2366                    a,
    +2367                    ax,
    +2368                    valid_independent_refs=valid_independent_refs,
    +2369                )
    +2370
    +2371        return outputs
    +2372
    +2373    packaged_by: List[Author] = Field(default_factory=list)
    +2374    """The persons that have packaged and uploaded this model.
    +2375    Only required if those persons differ from the `authors`."""
    +2376
    +2377    parent: Optional[LinkedModel] = None
    +2378    """The model from which this model is derived, e.g. by fine-tuning the weights."""
    +2379
    +2380    # todo: add parent self check once we have `id`
    +2381    # @model_validator(mode="after")
    +2382    # def validate_parent_is_not_self(self) -> Self:
    +2383    #     if self.parent is not None and self.parent == self.id:
    +2384    #         raise ValueError("The model may not reference itself as parent model")
    +2385
    +2386    #     return self
    +2387
    +2388    run_mode: Annotated[
    +2389        Optional[RunMode],
    +2390        warn(None, "Run mode '{value}' has limited support across consumer softwares."),
    +2391    ] = None
    +2392    """Custom run mode for this model: for more complex prediction procedures like test time
    +2393    data augmentation that currently cannot be expressed in the specification.
    +2394    No standard run modes are defined yet."""
    +2395
    +2396    timestamp: Datetime = Datetime(datetime.now())
    +2397    """Timestamp in [ISO 8601](#https://en.wikipedia.org/wiki/ISO_8601) format
    +2398    with a few restrictions listed [here](https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat).
    +2399    (In Python a datetime object is valid, too)."""
    +2400
    +2401    training_data: Annotated[
    +2402        Union[None, LinkedDataset, DatasetDescr, DatasetDescr02],
    +2403        Field(union_mode="left_to_right"),
    +2404    ] = None
    +2405    """The dataset used to train this model"""
    +2406
    +2407    weights: Annotated[WeightsDescr, WrapSerializer(package_weights)]
    +2408    """The weights for this model.
    +2409    Weights can be given for different formats, but should otherwise be equivalent.
    +2410    The available weight formats determine which consumers can use this model."""
    +2411
    +2412    @model_validator(mode="after")
    +2413    def _add_default_cover(self) -> Self:
    +2414        if not validation_context_var.get().perform_io_checks or self.covers:
    +2415            return self
    +2416
    +2417        try:
    +2418            generated_covers = generate_covers(
    +2419                [(t, load_array(t.test_tensor.download().path)) for t in self.inputs],
    +2420                [(t, load_array(t.test_tensor.download().path)) for t in self.outputs],
    +2421            )
    +2422        except Exception as e:
    +2423            issue_warning(
    +2424                "Failed to generate cover image(s): {e}",
    +2425                value=self.covers,
    +2426                msg_context=dict(e=e),
    +2427                field="covers",
    +2428            )
    +2429        else:
    +2430            self.covers.extend(generated_covers)
    +2431
    +2432        return self
    +2433
    +2434    def get_input_test_arrays(self) -> List[NDArray[Any]]:
    +2435        data = [load_array(ipt.test_tensor.download().path) for ipt in self.inputs]
    +2436        assert all(isinstance(d, np.ndarray) for d in data)
    +2437        return data
    +2438
    +2439    def get_output_test_arrays(self) -> List[NDArray[Any]]:
    +2440        data = [load_array(out.test_tensor.download().path) for out in self.outputs]
    +2441        assert all(isinstance(d, np.ndarray) for d in data)
    +2442        return data
    +2443
    +2444    @staticmethod
    +2445    def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int:
    +2446        batch_size = 1
    +2447        tensor_with_batchsize: Optional[TensorId] = None
    +2448        for tid in tensor_sizes:
    +2449            for aid, s in tensor_sizes[tid].items():
    +2450                if aid != BATCH_AXIS_ID or s == 1 or s == batch_size:
    +2451                    continue
    +2452
    +2453                if batch_size != 1:
    +2454                    assert tensor_with_batchsize is not None
    +2455                    raise ValueError(
    +2456                        f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})"
    +2457                    )
    +2458
    +2459                batch_size = s
    +2460                tensor_with_batchsize = tid
    +2461
    +2462        return batch_size
    +2463
    +2464    def get_output_tensor_sizes(
    +2465        self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]
    +2466    ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]:
    +2467        """Returns the tensor output sizes for given **input_sizes**.
    +2468        Only if **input_sizes** has a valid input shape, the tensor output size is exact.
    +2469        Otherwise it might be larger than the actual (valid) output"""
    +2470        batch_size = self.get_batch_size(input_sizes)
    +2471        ns = self.get_ns(input_sizes)
    +2472
    +2473        tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size)
    +2474        return tensor_sizes.outputs
    +2475
    +2476    def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]):
    +2477        """get parameter `n` for each parameterized axis
    +2478        such that the valid input size is >= the given input size"""
    +2479        ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {}
    +2480        axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs}
    +2481        for tid in input_sizes:
    +2482            for aid, s in input_sizes[tid].items():
    +2483                size_descr = axes[tid][aid].size
    +2484                if isinstance(size_descr, ParameterizedSize):
    +2485                    ret[(tid, aid)] = size_descr.get_n(s)
    +2486                elif size_descr is None or isinstance(size_descr, (int, SizeReference)):
    +2487                    pass
    +2488                else:
    +2489                    assert_never(size_descr)
    +2490
    +2491        return ret
    +2492
    +2493    def get_tensor_sizes(
    +2494        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
    +2495    ) -> _TensorSizes:
    +2496        axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size)
    +2497        return _TensorSizes(
    +2498            {
    +2499                t: {
    +2500                    aa: axis_sizes.inputs[(tt, aa)]
    +2501                    for tt, aa in axis_sizes.inputs
    +2502                    if tt == t
    +2503                }
    +2504                for t in {tt for tt, _ in axis_sizes.inputs}
    +2505            },
    +2506            {
    +2507                t: {
    +2508                    aa: axis_sizes.outputs[(tt, aa)]
    +2509                    for tt, aa in axis_sizes.outputs
    +2510                    if tt == t
    +2511                }
    +2512                for t in {tt for tt, _ in axis_sizes.outputs}
    +2513            },
    +2514        )
    +2515
    +2516    def get_axis_sizes(
    +2517        self,
    +2518        ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N],
    +2519        batch_size: Optional[int] = None,
    +2520        *,
    +2521        max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None,
    +2522    ) -> _AxisSizes:
    +2523        """Determine input and output block shape for scale factors **ns**
    +2524        of parameterized input sizes.
    +2525
    +2526        Args:
    +2527            ns: Scale factor `n` for each axis (keyed by (tensor_id, axis_id))
    +2528                that is parameterized as `size = min + n * step`.
    +2529            batch_size: The desired size of the batch dimension.
    +2530                If given **batch_size** overwrites any batch size present in
    +2531                **max_input_shape**. Default 1.
    +2532            max_input_shape: Limits the derived block shapes.
    +2533                Each axis for which the input size, parameterized by `n`, is larger
    +2534                than **max_input_shape** is set to the minimal value `n_min` for which
    +2535                this is still true.
    +2536                Use this for small input samples or large values of **ns**.
    +2537                Or simply whenever you know the full input shape.
    +2538
    +2539        Returns:
    +2540            Resolved axis sizes for model inputs and outputs.
    +2541        """
    +2542        max_input_shape = max_input_shape or {}
    +2543        if batch_size is None:
    +2544            for (_t_id, a_id), s in max_input_shape.items():
    +2545                if a_id == BATCH_AXIS_ID:
    +2546                    batch_size = s
    +2547                    break
    +2548            else:
    +2549                batch_size = 1
    +2550
    +2551        all_axes = {
    +2552            t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs)
    +2553        }
    +2554
    +2555        inputs: Dict[Tuple[TensorId, AxisId], int] = {}
    +2556        outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {}
    +2557
    +2558        def get_axis_size(a: Union[InputAxis, OutputAxis]):
    +2559            if isinstance(a, BatchAxis):
    +2560                if (t_descr.id, a.id) in ns:
    +2561                    logger.warning(
    +2562                        "Ignoring unexpected size increment factor (n) for batch axis"
    +2563                        + " of tensor '{}'.",
    +2564                        t_descr.id,
    +2565                    )
    +2566                return batch_size
    +2567            elif isinstance(a.size, int):
    +2568                if (t_descr.id, a.id) in ns:
    +2569                    logger.warning(
    +2570                        "Ignoring unexpected size increment factor (n) for fixed size"
    +2571                        + " axis '{}' of tensor '{}'.",
    +2572                        a.id,
    +2573                        t_descr.id,
    +2574                    )
    +2575                return a.size
    +2576            elif isinstance(a.size, ParameterizedSize):
    +2577                if (t_descr.id, a.id) not in ns:
    +2578                    raise ValueError(
    +2579                        "Size increment factor (n) missing for parametrized axis"
    +2580                        + f" '{a.id}' of tensor '{t_descr.id}'."
    +2581                    )
    +2582                n = ns[(t_descr.id, a.id)]
    +2583                s_max = max_input_shape.get((t_descr.id, a.id))
    +2584                if s_max is not None:
    +2585                    n = min(n, a.size.get_n(s_max))
    +2586
    +2587                return a.size.get_size(n)
    +2588
    +2589            elif isinstance(a.size, SizeReference):
    +2590                if (t_descr.id, a.id) in ns:
    +2591                    logger.warning(
    +2592                        "Ignoring unexpected size increment factor (n) for axis '{}'"
    +2593                        + " of tensor '{}' with size reference.",
    +2594                        a.id,
    +2595                        t_descr.id,
    +2596                    )
    +2597                assert not isinstance(a, BatchAxis)
    +2598                ref_axis = all_axes[a.size.tensor_id][a.size.axis_id]
    +2599                assert not isinstance(ref_axis, BatchAxis)
    +2600                ref_key = (a.size.tensor_id, a.size.axis_id)
    +2601                ref_size = inputs.get(ref_key, outputs.get(ref_key))
    +2602                assert ref_size is not None, ref_key
    +2603                assert not isinstance(ref_size, _DataDepSize), ref_key
    +2604                return a.size.get_size(
    +2605                    axis=a,
    +2606                    ref_axis=ref_axis,
    +2607                    ref_size=ref_size,
    +2608                )
    +2609            elif isinstance(a.size, DataDependentSize):
    +2610                if (t_descr.id, a.id) in ns:
    +2611                    logger.warning(
    +2612                        "Ignoring unexpected increment factor (n) for data dependent"
    +2613                        + " size axis '{}' of tensor '{}'.",
    +2614                        a.id,
    +2615                        t_descr.id,
    +2616                    )
    +2617                return _DataDepSize(a.size.min, a.size.max)
    +2618            else:
    +2619                assert_never(a.size)
    +2620
    +2621        # first resolve all , but the `SizeReference` input sizes
    +2622        for t_descr in self.inputs:
    +2623            for a in t_descr.axes:
    +2624                if not isinstance(a.size, SizeReference):
    +2625                    s = get_axis_size(a)
    +2626                    assert not isinstance(s, _DataDepSize)
    +2627                    inputs[t_descr.id, a.id] = s
    +2628
    +2629        # resolve all other input axis sizes
    +2630        for t_descr in self.inputs:
    +2631            for a in t_descr.axes:
    +2632                if isinstance(a.size, SizeReference):
    +2633                    s = get_axis_size(a)
    +2634                    assert not isinstance(s, _DataDepSize)
    +2635                    inputs[t_descr.id, a.id] = s
    +2636
    +2637        # resolve all output axis sizes
    +2638        for t_descr in self.outputs:
    +2639            for a in t_descr.axes:
    +2640                assert not isinstance(a.size, ParameterizedSize)
    +2641                s = get_axis_size(a)
    +2642                outputs[t_descr.id, a.id] = s
    +2643
    +2644        return _AxisSizes(inputs=inputs, outputs=outputs)
    +2645
    +2646    @model_validator(mode="before")
    +2647    @classmethod
    +2648    def _convert(cls, data: Dict[str, Any]) -> Dict[str, Any]:
    +2649        if (
    +2650            data.get("type") == "model"
    +2651            and isinstance(fv := data.get("format_version"), str)
    +2652            and fv.count(".") == 2
    +2653        ):
    +2654            fv_parts = fv.split(".")
    +2655            if any(not p.isdigit() for p in fv_parts):
    +2656                return data
    +2657
    +2658            fv_tuple = tuple(map(int, fv_parts))
    +2659
    +2660            assert cls.implemented_format_version_tuple[0:2] == (0, 5)
    +2661            if fv_tuple[:2] in ((0, 3), (0, 4)):
    +2662                m04 = _ModelDescr_v0_4.load(data)
    +2663                if not isinstance(m04, InvalidDescr):
    +2664                    return _model_conv.convert_as_dict(m04)
    +2665            elif fv_tuple[:2] == (0, 5):
    +2666                # bump patch version
    +2667                data["format_version"] = cls.implemented_format_version
    +2668
    +2669        return data
    +
    + + +

    Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights. +These fields are typically stored in a YAML file which we call a model resource description file (model RDF).

    +
    + + +
    +
    + format_version: Literal['0.5.3'] + + +
    + + +

    Version of the bioimage.io model description specification used. +When creating a new model always use the latest micro/patch version described here. +The format_version is important for any consumer software to understand how to parse the fields.

    +
    + + +
    +
    +
    + type: Literal['model'] + + +
    + + +

    Specialized resource type 'model'

    +
    + + +
    +
    +
    + id: Optional[ModelId] + + +
    + + +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    +
    + + +
    +
    +
    + authors: Annotated[List[bioimageio.spec.generic.v0_3.Author], MinLen(min_length=1)] + + +
    + + +

    The authors are the creators of the model RDF and the primary points of contact.

    +
    + + +
    +
    +
    + documentation: Annotated[Union[Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute)], bioimageio.spec._internal.io.RelativeFilePath, bioimageio.spec._internal.url.HttpUrl], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function _validate_md_suffix at 0x7f5380dc7e20>), PlainSerializer(func=<function _package at 0x7f538192d620>, return_type=PydanticUndefined, when_used='unless-none'), FieldInfo(annotation=NoneType, required=True, examples=['https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md', 'README.md'])] + + +
    + + +

    ∈📦 URL or relative path to a markdown file with additional documentation. +The recommended documentation file name is README.md. An .md suffix is mandatory. +The documentation should include a '#[#] Validation' (sub)section +with details on how to quantitatively validate the model on unseen data.

    +
    + + +
    +
    +
    + inputs: Annotated[Sequence[InputTensorDescr], MinLen(min_length=1)] + + +
    + + +

    Describes the input tensors expected by this model.

    +
    + + +
    +
    +
    + name: Annotated[str, RestrictCharacters(alphabet='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_- ()'), MinLen(min_length=5), MaxLen(max_length=128), AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f5370de8040>, severity=20, msg='Name longer than 64 characters.', context={'typ': Annotated[Any, MaxLen(max_length=64)]})] + + +
    + + +

    A human-readable name of this model. +It should be no longer than 64 characters +and may only contain letter, number, underscore, minus, parentheses and spaces. +We recommend to chose a name that refers to the model's task and image modality.

    +
    + + +
    +
    +
    + outputs: Annotated[Sequence[OutputTensorDescr], MinLen(min_length=1)] + + +
    + + +

    Describes the output tensors.

    +
    + + +
    +
    +
    + packaged_by: List[bioimageio.spec.generic.v0_3.Author] + + +
    + + +

    The persons that have packaged and uploaded this model. +Only required if those persons differ from the authors.

    +
    + + +
    +
    +
    + parent: Optional[LinkedModel] + + +
    + + +

    The model from which this model is derived, e.g. by fine-tuning the weights.

    +
    + + +
    +
    +
    + run_mode: Annotated[Optional[bioimageio.spec.model.v0_4.RunMode], AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f5372331ee0>, severity=30, msg="Run mode '{value}' has limited support across consumer softwares.", context={'typ': None})] + + +
    + + +

    Custom run mode for this model: for more complex prediction procedures like test time +data augmentation that currently cannot be expressed in the specification. +No standard run modes are defined yet.

    +
    + + +
    +
    +
    + timestamp: bioimageio.spec._internal.types.Datetime + + +
    + + +

    Timestamp in ISO 8601 format +with a few restrictions listed here. +(In Python a datetime object is valid, too).

    +
    + + +
    +
    +
    + training_data: Annotated[Union[NoneType, bioimageio.spec.dataset.v0_3.LinkedDataset, bioimageio.spec.DatasetDescr, bioimageio.spec.dataset.v0_2.DatasetDescr], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])] + + +
    + + +

    The dataset used to train this model

    +
    + + +
    +
    +
    + weights: Annotated[WeightsDescr, WrapSerializer(func=<function package_weights at 0x7f537f154360>, return_type=PydanticUndefined, when_used='always')] + + +
    + + +

    The weights for this model. +Weights can be given for different formats, but should otherwise be equivalent. +The available weight formats determine which consumers can use this model.

    +
    + + +
    +
    + +
    + + def + get_input_test_arrays(self) -> List[numpy.ndarray[Any, numpy.dtype[Any]]]: + + + +
    + +
    2434    def get_input_test_arrays(self) -> List[NDArray[Any]]:
    +2435        data = [load_array(ipt.test_tensor.download().path) for ipt in self.inputs]
    +2436        assert all(isinstance(d, np.ndarray) for d in data)
    +2437        return data
    +
    + + + + +
    +
    + +
    + + def + get_output_test_arrays(self) -> List[numpy.ndarray[Any, numpy.dtype[Any]]]: + + + +
    + +
    2439    def get_output_test_arrays(self) -> List[NDArray[Any]]:
    +2440        data = [load_array(out.test_tensor.download().path) for out in self.outputs]
    +2441        assert all(isinstance(d, np.ndarray) for d in data)
    +2442        return data
    +
    + + + + +
    +
    + +
    +
    @staticmethod
    + + def + get_batch_size( tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int: + + + +
    + +
    2444    @staticmethod
    +2445    def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int:
    +2446        batch_size = 1
    +2447        tensor_with_batchsize: Optional[TensorId] = None
    +2448        for tid in tensor_sizes:
    +2449            for aid, s in tensor_sizes[tid].items():
    +2450                if aid != BATCH_AXIS_ID or s == 1 or s == batch_size:
    +2451                    continue
    +2452
    +2453                if batch_size != 1:
    +2454                    assert tensor_with_batchsize is not None
    +2455                    raise ValueError(
    +2456                        f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})"
    +2457                    )
    +2458
    +2459                batch_size = s
    +2460                tensor_with_batchsize = tid
    +2461
    +2462        return batch_size
    +
    + + + + +
    +
    + +
    + + def + get_output_tensor_sizes( self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> Dict[TensorId, Dict[AxisId, Union[int, bioimageio.spec.model.v0_5._DataDepSize]]]: + + + +
    + +
    2464    def get_output_tensor_sizes(
    +2465        self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]
    +2466    ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]:
    +2467        """Returns the tensor output sizes for given **input_sizes**.
    +2468        Only if **input_sizes** has a valid input shape, the tensor output size is exact.
    +2469        Otherwise it might be larger than the actual (valid) output"""
    +2470        batch_size = self.get_batch_size(input_sizes)
    +2471        ns = self.get_ns(input_sizes)
    +2472
    +2473        tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size)
    +2474        return tensor_sizes.outputs
    +
    + + +

    Returns the tensor output sizes for given input_sizes. +Only if input_sizes has a valid input shape, the tensor output size is exact. +Otherwise it might be larger than the actual (valid) output

    +
    + + +
    +
    + +
    + + def + get_ns( self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]): + + + +
    + +
    2476    def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]):
    +2477        """get parameter `n` for each parameterized axis
    +2478        such that the valid input size is >= the given input size"""
    +2479        ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {}
    +2480        axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs}
    +2481        for tid in input_sizes:
    +2482            for aid, s in input_sizes[tid].items():
    +2483                size_descr = axes[tid][aid].size
    +2484                if isinstance(size_descr, ParameterizedSize):
    +2485                    ret[(tid, aid)] = size_descr.get_n(s)
    +2486                elif size_descr is None or isinstance(size_descr, (int, SizeReference)):
    +2487                    pass
    +2488                else:
    +2489                    assert_never(size_descr)
    +2490
    +2491        return ret
    +
    + + +

    get parameter n for each parameterized axis +such that the valid input size is >= the given input size

    +
    + + +
    +
    + +
    + + def + get_tensor_sizes( self, ns: Mapping[Tuple[TensorId, AxisId], int], batch_size: int) -> bioimageio.spec.model.v0_5._TensorSizes: + + + +
    + +
    2493    def get_tensor_sizes(
    +2494        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
    +2495    ) -> _TensorSizes:
    +2496        axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size)
    +2497        return _TensorSizes(
    +2498            {
    +2499                t: {
    +2500                    aa: axis_sizes.inputs[(tt, aa)]
    +2501                    for tt, aa in axis_sizes.inputs
    +2502                    if tt == t
    +2503                }
    +2504                for t in {tt for tt, _ in axis_sizes.inputs}
    +2505            },
    +2506            {
    +2507                t: {
    +2508                    aa: axis_sizes.outputs[(tt, aa)]
    +2509                    for tt, aa in axis_sizes.outputs
    +2510                    if tt == t
    +2511                }
    +2512                for t in {tt for tt, _ in axis_sizes.outputs}
    +2513            },
    +2514        )
    +
    + + + + +
    +
    + +
    + + def + get_axis_sizes( self, ns: Mapping[Tuple[TensorId, AxisId], int], batch_size: Optional[int] = None, *, max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None) -> bioimageio.spec.model.v0_5._AxisSizes: + + + +
    + +
    2516    def get_axis_sizes(
    +2517        self,
    +2518        ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N],
    +2519        batch_size: Optional[int] = None,
    +2520        *,
    +2521        max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None,
    +2522    ) -> _AxisSizes:
    +2523        """Determine input and output block shape for scale factors **ns**
    +2524        of parameterized input sizes.
    +2525
    +2526        Args:
    +2527            ns: Scale factor `n` for each axis (keyed by (tensor_id, axis_id))
    +2528                that is parameterized as `size = min + n * step`.
    +2529            batch_size: The desired size of the batch dimension.
    +2530                If given **batch_size** overwrites any batch size present in
    +2531                **max_input_shape**. Default 1.
    +2532            max_input_shape: Limits the derived block shapes.
    +2533                Each axis for which the input size, parameterized by `n`, is larger
    +2534                than **max_input_shape** is set to the minimal value `n_min` for which
    +2535                this is still true.
    +2536                Use this for small input samples or large values of **ns**.
    +2537                Or simply whenever you know the full input shape.
    +2538
    +2539        Returns:
    +2540            Resolved axis sizes for model inputs and outputs.
    +2541        """
    +2542        max_input_shape = max_input_shape or {}
    +2543        if batch_size is None:
    +2544            for (_t_id, a_id), s in max_input_shape.items():
    +2545                if a_id == BATCH_AXIS_ID:
    +2546                    batch_size = s
    +2547                    break
    +2548            else:
    +2549                batch_size = 1
    +2550
    +2551        all_axes = {
    +2552            t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs)
    +2553        }
    +2554
    +2555        inputs: Dict[Tuple[TensorId, AxisId], int] = {}
    +2556        outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {}
    +2557
    +2558        def get_axis_size(a: Union[InputAxis, OutputAxis]):
    +2559            if isinstance(a, BatchAxis):
    +2560                if (t_descr.id, a.id) in ns:
    +2561                    logger.warning(
    +2562                        "Ignoring unexpected size increment factor (n) for batch axis"
    +2563                        + " of tensor '{}'.",
    +2564                        t_descr.id,
    +2565                    )
    +2566                return batch_size
    +2567            elif isinstance(a.size, int):
    +2568                if (t_descr.id, a.id) in ns:
    +2569                    logger.warning(
    +2570                        "Ignoring unexpected size increment factor (n) for fixed size"
    +2571                        + " axis '{}' of tensor '{}'.",
    +2572                        a.id,
    +2573                        t_descr.id,
    +2574                    )
    +2575                return a.size
    +2576            elif isinstance(a.size, ParameterizedSize):
    +2577                if (t_descr.id, a.id) not in ns:
    +2578                    raise ValueError(
    +2579                        "Size increment factor (n) missing for parametrized axis"
    +2580                        + f" '{a.id}' of tensor '{t_descr.id}'."
    +2581                    )
    +2582                n = ns[(t_descr.id, a.id)]
    +2583                s_max = max_input_shape.get((t_descr.id, a.id))
    +2584                if s_max is not None:
    +2585                    n = min(n, a.size.get_n(s_max))
    +2586
    +2587                return a.size.get_size(n)
    +2588
    +2589            elif isinstance(a.size, SizeReference):
    +2590                if (t_descr.id, a.id) in ns:
    +2591                    logger.warning(
    +2592                        "Ignoring unexpected size increment factor (n) for axis '{}'"
    +2593                        + " of tensor '{}' with size reference.",
    +2594                        a.id,
    +2595                        t_descr.id,
    +2596                    )
    +2597                assert not isinstance(a, BatchAxis)
    +2598                ref_axis = all_axes[a.size.tensor_id][a.size.axis_id]
    +2599                assert not isinstance(ref_axis, BatchAxis)
    +2600                ref_key = (a.size.tensor_id, a.size.axis_id)
    +2601                ref_size = inputs.get(ref_key, outputs.get(ref_key))
    +2602                assert ref_size is not None, ref_key
    +2603                assert not isinstance(ref_size, _DataDepSize), ref_key
    +2604                return a.size.get_size(
    +2605                    axis=a,
    +2606                    ref_axis=ref_axis,
    +2607                    ref_size=ref_size,
    +2608                )
    +2609            elif isinstance(a.size, DataDependentSize):
    +2610                if (t_descr.id, a.id) in ns:
    +2611                    logger.warning(
    +2612                        "Ignoring unexpected increment factor (n) for data dependent"
    +2613                        + " size axis '{}' of tensor '{}'.",
    +2614                        a.id,
    +2615                        t_descr.id,
    +2616                    )
    +2617                return _DataDepSize(a.size.min, a.size.max)
    +2618            else:
    +2619                assert_never(a.size)
    +2620
    +2621        # first resolve all , but the `SizeReference` input sizes
    +2622        for t_descr in self.inputs:
    +2623            for a in t_descr.axes:
    +2624                if not isinstance(a.size, SizeReference):
    +2625                    s = get_axis_size(a)
    +2626                    assert not isinstance(s, _DataDepSize)
    +2627                    inputs[t_descr.id, a.id] = s
    +2628
    +2629        # resolve all other input axis sizes
    +2630        for t_descr in self.inputs:
    +2631            for a in t_descr.axes:
    +2632                if isinstance(a.size, SizeReference):
    +2633                    s = get_axis_size(a)
    +2634                    assert not isinstance(s, _DataDepSize)
    +2635                    inputs[t_descr.id, a.id] = s
    +2636
    +2637        # resolve all output axis sizes
    +2638        for t_descr in self.outputs:
    +2639            for a in t_descr.axes:
    +2640                assert not isinstance(a.size, ParameterizedSize)
    +2641                s = get_axis_size(a)
    +2642                outputs[t_descr.id, a.id] = s
    +2643
    +2644        return _AxisSizes(inputs=inputs, outputs=outputs)
    +
    + + +

    Determine input and output block shape for scale factors ns +of parameterized input sizes.

    + +
    Arguments:
    + +
      +
    • ns: Scale factor n for each axis (keyed by (tensor_id, axis_id)) +that is parameterized as size = min + n * step.
    • +
    • batch_size: The desired size of the batch dimension. +If given batch_size overwrites any batch size present in +max_input_shape. Default 1.
    • +
    • max_input_shape: Limits the derived block shapes. +Each axis for which the input size, parameterized by n, is larger +than max_input_shape is set to the minimal value n_min for which +this is still true. +Use this for small input samples or large values of ns. +Or simply whenever you know the full input shape.
    • +
    + +
    Returns:
    + +
    +

    Resolved axis sizes for model inputs and outputs.

    +
    +
    + + +
    +
    +
    + implemented_format_version: ClassVar[str] = +'0.5.3' + + +
    + + + + +
    +
    +
    + implemented_format_version_tuple: ClassVar[Tuple[int, int, int]] = +(0, 5, 3) + + +
    + + + + +
    +
    + +
    + + def + model_post_init(self: pydantic.main.BaseModel, context: Any, /) -> None: + + + +
    + +
    124                    def wrapped_model_post_init(self: BaseModel, context: Any, /) -> None:
    +125                        """We need to both initialize private attributes and call the user-defined model_post_init
    +126                        method.
    +127                        """
    +128                        init_private_attributes(self, context)
    +129                        original_model_post_init(self, context)
    +
    + + +

    We need to both initialize private attributes and call the user-defined model_post_init +method.

    +
    + + +
    + +
    +
    + +
    + + def + generate_covers( inputs: Sequence[Tuple[InputTensorDescr, numpy.ndarray[Any, numpy.dtype[Any]]]], outputs: Sequence[Tuple[OutputTensorDescr, numpy.ndarray[Any, numpy.dtype[Any]]]]) -> List[pathlib.Path]: + + + +
    + +
    2894def generate_covers(
    +2895    inputs: Sequence[Tuple[InputTensorDescr, NDArray[Any]]],
    +2896    outputs: Sequence[Tuple[OutputTensorDescr, NDArray[Any]]],
    +2897) -> List[Path]:
    +2898    def squeeze(
    +2899        data: NDArray[Any], axes: Sequence[AnyAxis]
    +2900    ) -> Tuple[NDArray[Any], List[AnyAxis]]:
    +2901        """apply numpy.ndarray.squeeze while keeping track of the axis descriptions remaining"""
    +2902        if data.ndim != len(axes):
    +2903            raise ValueError(
    +2904                f"tensor shape {data.shape} does not match described axes"
    +2905                + f" {[a.id for a in axes]}"
    +2906            )
    +2907
    +2908        axes = [deepcopy(a) for a, s in zip(axes, data.shape) if s != 1]
    +2909        return data.squeeze(), axes
    +2910
    +2911    def normalize(
    +2912        data: NDArray[Any], axis: Optional[Tuple[int, ...]], eps: float = 1e-7
    +2913    ) -> NDArray[np.float32]:
    +2914        data = data.astype("float32")
    +2915        data -= data.min(axis=axis, keepdims=True)
    +2916        data /= data.max(axis=axis, keepdims=True) + eps
    +2917        return data
    +2918
    +2919    def to_2d_image(data: NDArray[Any], axes: Sequence[AnyAxis]):
    +2920        original_shape = data.shape
    +2921        data, axes = squeeze(data, axes)
    +2922
    +2923        # take slice fom any batch or index axis if needed
    +2924        # and convert the first channel axis and take a slice from any additional channel axes
    +2925        slices: Tuple[slice, ...] = ()
    +2926        ndim = data.ndim
    +2927        ndim_need = 3 if any(isinstance(a, ChannelAxis) for a in axes) else 2
    +2928        has_c_axis = False
    +2929        for i, a in enumerate(axes):
    +2930            s = data.shape[i]
    +2931            assert s > 1
    +2932            if (
    +2933                isinstance(a, (BatchAxis, IndexInputAxis, IndexOutputAxis))
    +2934                and ndim > ndim_need
    +2935            ):
    +2936                data = data[slices + (slice(s // 2 - 1, s // 2),)]
    +2937                ndim -= 1
    +2938            elif isinstance(a, ChannelAxis):
    +2939                if has_c_axis:
    +2940                    # second channel axis
    +2941                    data = data[slices + (slice(0, 1),)]
    +2942                    ndim -= 1
    +2943                else:
    +2944                    has_c_axis = True
    +2945                    if s == 2:
    +2946                        # visualize two channels with cyan and magenta
    +2947                        data = np.concatenate(
    +2948                            [
    +2949                                data[slices + (slice(1, 2),)],
    +2950                                data[slices + (slice(0, 1),)],
    +2951                                (
    +2952                                    data[slices + (slice(0, 1),)]
    +2953                                    + data[slices + (slice(1, 2),)]
    +2954                                )
    +2955                                / 2,  # TODO: take maximum instead?
    +2956                            ],
    +2957                            axis=i,
    +2958                        )
    +2959                    elif data.shape[i] == 3:
    +2960                        pass  # visualize 3 channels as RGB
    +2961                    else:
    +2962                        # visualize first 3 channels as RGB
    +2963                        data = data[slices + (slice(3),)]
    +2964
    +2965                    assert data.shape[i] == 3
    +2966
    +2967            slices += (slice(None),)
    +2968
    +2969        data, axes = squeeze(data, axes)
    +2970        assert len(axes) == ndim
    +2971        # take slice from z axis if needed
    +2972        slices = ()
    +2973        if ndim > ndim_need:
    +2974            for i, a in enumerate(axes):
    +2975                s = data.shape[i]
    +2976                if a.id == AxisId("z"):
    +2977                    data = data[slices + (slice(s // 2 - 1, s // 2),)]
    +2978                    data, axes = squeeze(data, axes)
    +2979                    ndim -= 1
    +2980                    break
    +2981
    +2982            slices += (slice(None),)
    +2983
    +2984        # take slice from any space or time axis
    +2985        slices = ()
    +2986
    +2987        for i, a in enumerate(axes):
    +2988            if ndim <= ndim_need:
    +2989                break
    +2990
    +2991            s = data.shape[i]
    +2992            assert s > 1
    +2993            if isinstance(
    +2994                a, (SpaceInputAxis, SpaceOutputAxis, TimeInputAxis, TimeOutputAxis)
    +2995            ):
    +2996                data = data[slices + (slice(s // 2 - 1, s // 2),)]
    +2997                ndim -= 1
    +2998
    +2999            slices += (slice(None),)
    +3000
    +3001        del slices
    +3002        data, axes = squeeze(data, axes)
    +3003        assert len(axes) == ndim
    +3004
    +3005        if (has_c_axis and ndim != 3) or ndim != 2:
    +3006            raise ValueError(
    +3007                f"Failed to construct cover image from shape {original_shape}"
    +3008            )
    +3009
    +3010        if not has_c_axis:
    +3011            assert ndim == 2
    +3012            data = np.repeat(data[:, :, None], 3, axis=2)
    +3013            axes.append(ChannelAxis(channel_names=list(map(Identifier, "RGB"))))
    +3014            ndim += 1
    +3015
    +3016        assert ndim == 3
    +3017
    +3018        # transpose axis order such that longest axis comes first...
    +3019        axis_order = list(np.argsort(list(data.shape)))
    +3020        axis_order.reverse()
    +3021        # ... and channel axis is last
    +3022        c = [i for i in range(3) if isinstance(axes[i], ChannelAxis)][0]
    +3023        axis_order.append(axis_order.pop(c))
    +3024        axes = [axes[ao] for ao in axis_order]
    +3025        data = data.transpose(axis_order)
    +3026
    +3027        # h, w = data.shape[:2]
    +3028        # if h / w  in (1.0 or 2.0):
    +3029        #     pass
    +3030        # elif h / w < 2:
    +3031        # TODO: enforce 2:1 or 1:1 aspect ratio for generated cover images
    +3032
    +3033        norm_along = (
    +3034            tuple(i for i, a in enumerate(axes) if a.type in ("space", "time")) or None
    +3035        )
    +3036        # normalize the data and map to 8 bit
    +3037        data = normalize(data, norm_along)
    +3038        data = (data * 255).astype("uint8")
    +3039
    +3040        return data
    +3041
    +3042    def create_diagonal_split_image(im0: NDArray[Any], im1: NDArray[Any]):
    +3043        assert im0.dtype == im1.dtype == np.uint8
    +3044        assert im0.shape == im1.shape
    +3045        assert im0.ndim == 3
    +3046        N, M, C = im0.shape
    +3047        assert C == 3
    +3048        out = np.ones((N, M, C), dtype="uint8")
    +3049        for c in range(C):
    +3050            outc = np.tril(im0[..., c])
    +3051            mask = outc == 0
    +3052            outc[mask] = np.triu(im1[..., c])[mask]
    +3053            out[..., c] = outc
    +3054
    +3055        return out
    +3056
    +3057    ipt_descr, ipt = inputs[0]
    +3058    out_descr, out = outputs[0]
    +3059
    +3060    ipt_img = to_2d_image(ipt, ipt_descr.axes)
    +3061    out_img = to_2d_image(out, out_descr.axes)
    +3062
    +3063    cover_folder = Path(mkdtemp())
    +3064    if ipt_img.shape == out_img.shape:
    +3065        covers = [cover_folder / "cover.png"]
    +3066        imwrite(covers[0], create_diagonal_split_image(ipt_img, out_img))
    +3067    else:
    +3068        covers = [cover_folder / "input.png", cover_folder / "output.png"]
    +3069        imwrite(covers[0], ipt_img)
    +3070        imwrite(covers[1], out_img)
    +3071
    +3072    return covers
    +
    + + + + +
    +
    +
    + + class + TensorDescrBase[Annotated[Union[BatchAxis, ChannelAxis, IndexInputAxis, TimeInputAxis, SpaceInputAxis], Discriminator]](bioimageio.spec._internal.node.Node, typing.Generic[~IO_AxisT]): + + +
    + + +

    Subpart of a resource description

    +
    + + + +
    +
    +
    + + class + TensorDescrBase[Annotated[Union[BatchAxis, ChannelAxis, IndexOutputAxis, Annotated[Union[Annotated[TimeOutputAxis, Tag], Annotated[TimeOutputAxisWithHalo, Tag]], Discriminator], Annotated[Union[Annotated[SpaceOutputAxis, Tag], Annotated[SpaceOutputAxisWithHalo, Tag]], Discriminator]], Discriminator]](bioimageio.spec._internal.node.Node, typing.Generic[~IO_AxisT]): + + +
    + + +

    Subpart of a resource description

    +
    + + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/pretty_validation_errors.html b/bioimageio/spec/pretty_validation_errors.html new file mode 100644 index 00000000..f24aaa97 --- /dev/null +++ b/bioimageio/spec/pretty_validation_errors.html @@ -0,0 +1,441 @@ + + + + + + + bioimageio.spec.pretty_validation_errors API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.pretty_validation_errors

    + + + + + + +
     1from pprint import pformat
    + 2from types import TracebackType
    + 3from typing import Any, List, Type, Union
    + 4
    + 5from pydantic import ValidationError
    + 6
    + 7from .summary import format_loc
    + 8
    + 9try:
    +10    from IPython.core.getipython import get_ipython
    +11    from IPython.core.interactiveshell import InteractiveShell
    +12
    +13    class PrettyValidationError(ValueError):
    +14        """Wrap a pydantic.ValidationError to custumize formatting."""
    +15
    +16        def __init__(self, validation_error: ValidationError):
    +17            super().__init__()
    +18            self.error = validation_error
    +19
    +20        def __str__(self):
    +21            errors: List[str] = []
    +22            for e in self.error.errors(include_url=False):
    +23                ipt_lines = pformat(
    +24                    e["input"], sort_dicts=False, depth=1, compact=True, width=30
    +25                ).split("\n")
    +26                if len(ipt_lines) > 2:
    +27                    ipt_lines[1:-1] = ["..."]
    +28
    +29                ipt = " ".join([il.strip() for il in ipt_lines])
    +30
    +31                errors.append(
    +32                    f"\n{format_loc(e['loc'], enclose_in='')}\n  {e['msg']} [input={ipt}]"
    +33                )
    +34
    +35            return (
    +36                f"{self.error.error_count()} validation errors for"
    +37                f" {self.error.title}:{''.join(errors)}"
    +38            )
    +39
    +40    def _custom_exception_handler(
    +41        self: InteractiveShell,
    +42        etype: Type[ValidationError],
    +43        evalue: ValidationError,
    +44        tb: TracebackType,
    +45        tb_offset: Any = None,
    +46    ):
    +47        assert issubclass(etype, ValidationError), type(etype)
    +48        assert isinstance(evalue, ValidationError), type(etype)
    +49
    +50        stb: Union[Any, List[Union[str, Any]]]
    +51        stb = self.InteractiveTB.structured_traceback(  # pyright: ignore[reportUnknownVariableType]
    +52            etype, PrettyValidationError(evalue), tb, tb_offset=tb_offset
    +53        )
    +54
    +55        if isinstance(stb, list):
    +56            stb_clean = []
    +57            for line in stb:  # pyright: ignore[reportUnknownVariableType]
    +58                if (
    +59                    isinstance(line, str)
    +60                    and "pydantic" in line
    +61                    and "__tracebackhide__" in line
    +62                ):
    +63                    # ignore pydantic internal frame in traceback
    +64                    continue
    +65                stb_clean.append(line)
    +66
    +67            stb = stb_clean
    +68
    +69        self._showtraceback(etype, PrettyValidationError(evalue), stb)  # type: ignore
    +70
    +71    def enable_pretty_validation_errors_in_ipynb():
    +72        """A modestly hacky way to display prettified validaiton error messages and traceback
    +73        in interactive Python notebooks"""
    +74        ipy = get_ipython()
    +75        if ipy is not None:
    +76            ipy.set_custom_exc((ValidationError,), _custom_exception_handler)
    +77
    +78except ImportError:
    +79
    +80    def enable_pretty_validation_errors_in_ipynb():
    +81        return
    +
    + + +
    +
    + +
    + + class + PrettyValidationError(builtins.ValueError): + + + +
    + +
    14    class PrettyValidationError(ValueError):
    +15        """Wrap a pydantic.ValidationError to custumize formatting."""
    +16
    +17        def __init__(self, validation_error: ValidationError):
    +18            super().__init__()
    +19            self.error = validation_error
    +20
    +21        def __str__(self):
    +22            errors: List[str] = []
    +23            for e in self.error.errors(include_url=False):
    +24                ipt_lines = pformat(
    +25                    e["input"], sort_dicts=False, depth=1, compact=True, width=30
    +26                ).split("\n")
    +27                if len(ipt_lines) > 2:
    +28                    ipt_lines[1:-1] = ["..."]
    +29
    +30                ipt = " ".join([il.strip() for il in ipt_lines])
    +31
    +32                errors.append(
    +33                    f"\n{format_loc(e['loc'], enclose_in='')}\n  {e['msg']} [input={ipt}]"
    +34                )
    +35
    +36            return (
    +37                f"{self.error.error_count()} validation errors for"
    +38                f" {self.error.title}:{''.join(errors)}"
    +39            )
    +
    + + +

    Wrap a pydantic.ValidationError to custumize formatting.

    +
    + + +
    + +
    + + PrettyValidationError(validation_error: pydantic_core._pydantic_core.ValidationError) + + + +
    + +
    17        def __init__(self, validation_error: ValidationError):
    +18            super().__init__()
    +19            self.error = validation_error
    +
    + + + + +
    +
    +
    + error + + +
    + + + + +
    +
    +
    + +
    + + def + enable_pretty_validation_errors_in_ipynb(): + + + +
    + +
    72    def enable_pretty_validation_errors_in_ipynb():
    +73        """A modestly hacky way to display prettified validaiton error messages and traceback
    +74        in interactive Python notebooks"""
    +75        ipy = get_ipython()
    +76        if ipy is not None:
    +77            ipy.set_custom_exc((ValidationError,), _custom_exception_handler)
    +
    + + +

    A modestly hacky way to display prettified validaiton error messages and traceback +in interactive Python notebooks

    +
    + + +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/summary.html b/bioimageio/spec/summary.html new file mode 100644 index 00000000..9726b576 --- /dev/null +++ b/bioimageio/spec/summary.html @@ -0,0 +1,2255 @@ + + + + + + + bioimageio.spec.summary API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.summary

    + + + + + + +
      1import subprocess
    +  2from io import StringIO
    +  3from itertools import chain
    +  4from pathlib import Path
    +  5from tempfile import TemporaryDirectory
    +  6from types import MappingProxyType
    +  7from typing import (
    +  8    Any,
    +  9    Dict,
    + 10    Iterable,
    + 11    List,
    + 12    Literal,
    + 13    Mapping,
    + 14    NamedTuple,
    + 15    Optional,
    + 16    Sequence,
    + 17    Set,
    + 18    Tuple,
    + 19    Union,
    + 20    no_type_check,
    + 21)
    + 22
    + 23import rich.console
    + 24import rich.markdown
    + 25from pydantic import BaseModel, Field, field_validator, model_validator
    + 26from pydantic_core.core_schema import ErrorType
    + 27from typing_extensions import TypedDict, assert_never
    + 28
    + 29from ._internal.constants import VERSION
    + 30from ._internal.io import is_yaml_value
    + 31from ._internal.io_utils import write_yaml
    + 32from ._internal.type_guards import is_mapping
    + 33from ._internal.warning_levels import (
    + 34    ALERT,
    + 35    ALERT_NAME,
    + 36    ERROR,
    + 37    ERROR_NAME,
    + 38    INFO,
    + 39    INFO_NAME,
    + 40    WARNING,
    + 41    WARNING_NAME,
    + 42    WarningLevel,
    + 43    WarningSeverity,
    + 44)
    + 45from .conda_env import CondaEnv
    + 46
    + 47Loc = Tuple[Union[int, str], ...]
    + 48"""location of error/warning in a nested data structure"""
    + 49
    + 50WarningSeverityName = Literal["info", "warning", "alert"]
    + 51WarningLevelName = Literal[WarningSeverityName, "error"]
    + 52
    + 53WARNING_SEVERITY_TO_NAME: Mapping[WarningSeverity, WarningSeverityName] = (
    + 54    MappingProxyType({INFO: INFO_NAME, WARNING: WARNING_NAME, ALERT: ALERT_NAME})
    + 55)
    + 56WARNING_LEVEL_TO_NAME: Mapping[WarningLevel, WarningLevelName] = MappingProxyType(
    + 57    {INFO: INFO_NAME, WARNING: WARNING_NAME, ALERT: ALERT_NAME, ERROR: ERROR_NAME}
    + 58)
    + 59WARNING_NAME_TO_LEVEL: Mapping[WarningLevelName, WarningLevel] = MappingProxyType(
    + 60    {v: k for k, v in WARNING_LEVEL_TO_NAME.items()}
    + 61)
    + 62
    + 63
    + 64class ValidationEntry(BaseModel):
    + 65    """Base of `ErrorEntry` and `WarningEntry`"""
    + 66
    + 67    loc: Loc
    + 68    msg: str
    + 69    type: Union[ErrorType, str]
    + 70
    + 71
    + 72class ErrorEntry(ValidationEntry):
    + 73    """An error in a `ValidationDetail`"""
    + 74
    + 75    traceback: List[str] = Field(default_factory=list)
    + 76
    + 77
    + 78class WarningEntry(ValidationEntry):
    + 79    """A warning in a `ValidationDetail`"""
    + 80
    + 81    severity: WarningSeverity = WARNING
    + 82    severity_name: WarningSeverityName = WARNING_NAME
    + 83
    + 84    @model_validator(mode="before")
    + 85    @classmethod
    + 86    def sync_severity_with_severity_name(
    + 87        cls, data: Union[Mapping[Any, Any], Any]
    + 88    ) -> Any:
    + 89        if is_mapping(data):
    + 90            data = dict(data)
    + 91            if (
    + 92                "severity" in data
    + 93                and "severity_name" not in data
    + 94                and data["severity"] in WARNING_SEVERITY_TO_NAME
    + 95            ):
    + 96                data["severity_name"] = WARNING_SEVERITY_TO_NAME[data["severity"]]
    + 97
    + 98            if (
    + 99                "severity" in data
    +100                and "severity_name" not in data
    +101                and data["severity"] in WARNING_SEVERITY_TO_NAME
    +102            ):
    +103                data["severity"] = WARNING_NAME_TO_LEVEL[data["severity_name"]]
    +104
    +105        return data
    +106
    +107
    +108def format_loc(loc: Loc, enclose_in: str = "`") -> str:
    +109    """helper to format a location tuple `Loc` as Markdown string"""
    +110    if not loc:
    +111        loc = ("__root__",)
    +112
    +113    loc_str = ".".join(f"({x})" if x[0].isupper() else x for x in map(str, loc))
    +114
    +115    # additional field validation can make the location information quite convoluted, e.g.
    +116    # `weights.pytorch_state_dict.dependencies.source.function-after[validate_url_ok(), url['http','https']]` Input should be a valid URL, relative URL without a base
    +117    # therefore we remove the `.function-after[validate_url_ok(), url['http','https']]` here
    +118    brief_loc_str, *_ = loc_str.split(".function-after")
    +119    return f"{enclose_in}{brief_loc_str}{enclose_in}"
    +120
    +121
    +122class InstalledPackage(NamedTuple):
    +123    name: str
    +124    version: str
    +125    build: str = ""
    +126    channel: str = ""
    +127
    +128
    +129class ValidationContextSummary(TypedDict):
    +130    perform_io_checks: bool
    +131    known_files: Mapping[str, str]
    +132    root: str
    +133    warning_level: str
    +134
    +135
    +136class ValidationDetail(BaseModel, extra="allow"):
    +137    """a detail in a validation summary"""
    +138
    +139    name: str
    +140    status: Literal["passed", "failed"]
    +141    loc: Loc = ()
    +142    """location in the RDF that this detail applies to"""
    +143    errors: List[ErrorEntry] = Field(default_factory=list)
    +144    warnings: List[WarningEntry] = Field(default_factory=list)
    +145    context: Optional[ValidationContextSummary] = None
    +146
    +147    recommended_env: Optional[CondaEnv] = None
    +148    """recommended conda environemnt for this validation detail"""
    +149    conda_compare: Optional[str] = None
    +150    """output of `conda compare <recommended env>`"""
    +151
    +152    def model_post_init(self, __context: Any):
    +153        """create `conda_compare` default value if needed"""
    +154        super().model_post_init(__context)
    +155        if self.recommended_env is None or self.conda_compare is not None:
    +156            return
    +157
    +158        dumped_env = self.recommended_env.model_dump(mode="json")
    +159        if not is_yaml_value(dumped_env):
    +160            self.conda_compare = "Failed to dump recommended env to valid yaml"
    +161            return
    +162
    +163        with TemporaryDirectory() as d:
    +164            path = Path(d) / "env.yaml"
    +165            with path.open("w", encoding="utf-8") as f:
    +166                write_yaml(dumped_env, f)
    +167
    +168            compare_proc = subprocess.run(
    +169                ["conda", "compare", str(path)],
    +170                stdout=subprocess.PIPE,
    +171                stderr=subprocess.STDOUT,
    +172                shell=True,
    +173                text=True,
    +174            )
    +175            self.conda_compare = (
    +176                compare_proc.stdout
    +177                or f"conda compare exited with {compare_proc.returncode}"
    +178            )
    +179
    +180    def __str__(self):
    +181        return f"{self.__class__.__name__}:\n" + self.format()
    +182
    +183    @property
    +184    def status_icon(self):
    +185        if self.status == "passed":
    +186            return "✔️"
    +187        else:
    +188            return "❌"
    +189
    +190    def format(self, hide_tracebacks: bool = False, root_loc: Loc = ()) -> str:
    +191        """format as Markdown string"""
    +192        indent = "    " if root_loc else ""
    +193        errs_wrns = self._format_errors_and_warnings(
    +194            hide_tracebacks=hide_tracebacks, root_loc=root_loc
    +195        )
    +196        return f"{indent}{self.status_icon} {self.name.strip('.')}: {self.status}{errs_wrns}"
    +197
    +198    def _format_errors_and_warnings(self, hide_tracebacks: bool, root_loc: Loc):
    +199        indent = "    " if root_loc else ""
    +200        if hide_tracebacks:
    +201            tbs = [""] * len(self.errors)
    +202        else:
    +203            slim_tracebacks = [
    +204                [tt.replace("\n", "<br>") for t in e.traceback if (tt := t.strip())]
    +205                for e in self.errors
    +206            ]
    +207            tbs = [
    +208                ("<br>      Traceback:<br>      " if st else "") + "<br>      ".join(st)
    +209                for st in slim_tracebacks
    +210            ]
    +211
    +212        def join_parts(parts: Iterable[Tuple[str, str]]):
    +213            last_loc = None
    +214            lines: List[str] = []
    +215            for loc, msg in parts:
    +216                if loc == last_loc:
    +217                    lines.append(f"<br>  {loc} {msg}")
    +218                else:
    +219                    lines.append(f"<br>- {loc} {msg}")
    +220
    +221                last_loc = loc
    +222
    +223            return "".join(lines)
    +224
    +225        es = join_parts(
    +226            (format_loc(root_loc + e.loc), f"{e.msg}{tb}")
    +227            for e, tb in zip(self.errors, tbs)
    +228        )
    +229        ws = join_parts((format_loc(root_loc + w.loc), w.msg) for w in self.warnings)
    +230
    +231        return (
    +232            f"\n{indent}errors:\n{es}"
    +233            if es
    +234            else "" + f"\n{indent}warnings:\n{ws}" if ws else ""
    +235        )
    +236
    +237
    +238class ValidationSummary(BaseModel, extra="allow"):
    +239    """Summarizes output of all bioimageio validations and tests
    +240    for one specific `ResourceDescr` instance."""
    +241
    +242    name: str
    +243    source_name: str
    +244    type: str
    +245    format_version: str
    +246    status: Literal["passed", "failed"]
    +247    details: List[ValidationDetail]
    +248    env: Set[InstalledPackage] = Field(
    +249        default_factory=lambda: {
    +250            InstalledPackage(name="bioimageio.spec", version=VERSION)
    +251        }
    +252    )
    +253    """list of selected, relevant package versions"""
    +254
    +255    conda_list: Optional[Sequence[InstalledPackage]] = None
    +256    """parsed output of conda list"""
    +257
    +258    @property
    +259    def status_icon(self):
    +260        if self.status == "passed":
    +261            return "✔️"
    +262        else:
    +263            return "❌"
    +264
    +265    @property
    +266    def errors(self) -> List[ErrorEntry]:
    +267        return list(chain.from_iterable(d.errors for d in self.details))
    +268
    +269    @property
    +270    def warnings(self) -> List[WarningEntry]:
    +271        return list(chain.from_iterable(d.warnings for d in self.details))
    +272
    +273    def __str__(self):
    +274        return f"{self.__class__.__name__}:\n" + self.format()
    +275
    +276    @staticmethod
    +277    def _format_md_table(rows: List[List[str]]) -> str:
    +278        """format `rows` as markdown table"""
    +279        n_cols = len(rows[0])
    +280        assert all(len(row) == n_cols for row in rows)
    +281        col_widths = [max(max(len(row[i]) for row in rows), 3) for i in range(n_cols)]
    +282
    +283        # fix new lines in table cell
    +284        rows = [[line.replace("\n", "<br>") for line in r] for r in rows]
    +285
    +286        lines = [" | ".join(rows[0][i].center(col_widths[i]) for i in range(n_cols))]
    +287        lines.append(" | ".join("---".center(col_widths[i]) for i in range(n_cols)))
    +288        lines.extend(
    +289            [
    +290                " | ".join(row[i].ljust(col_widths[i]) for i in range(n_cols))
    +291                for row in rows[1:]
    +292            ]
    +293        )
    +294        return "\n| " + " |\n| ".join(lines) + " |\n"
    +295
    +296    def format(
    +297        self,
    +298        hide_tracebacks: bool = False,
    +299        hide_source: bool = False,
    +300        hide_env: bool = False,
    +301        root_loc: Loc = (),
    +302    ) -> str:
    +303        """Format summary as Markdown string
    +304
    +305        Suitable to embed in HTML using '<br>' instead of '\n'.
    +306        """
    +307        info = self._format_md_table(
    +308            [[self.status_icon, f"{self.name.strip('.').strip()} {self.status}"]]
    +309            + ([] if hide_source else [["source", self.source_name]])
    +310            + [
    +311                ["format version", f"{self.type} {self.format_version}"],
    +312            ]
    +313            + ([] if hide_env else [[e.name, e.version] for e in self.env])
    +314        )
    +315
    +316        def format_loc(loc: Loc):
    +317            return "`" + (".".join(map(str, root_loc + loc)) or ".") + "`"
    +318
    +319        details = [["❓", "location", "detail"]]
    +320        for d in self.details:
    +321            details.append([d.status_icon, format_loc(d.loc), d.name])
    +322            if d.context is not None:
    +323                details.append(
    +324                    [
    +325                        "🔍",
    +326                        "context.perform_io_checks",
    +327                        str(d.context["perform_io_checks"]),
    +328                    ]
    +329                )
    +330                if d.context["perform_io_checks"]:
    +331                    details.append(["🔍", "context.root", d.context["root"]])
    +332                    for kfn, sha in d.context["known_files"].items():
    +333                        details.append(["🔍", f"context.known_files.{kfn}", sha])
    +334
    +335                details.append(
    +336                    ["🔍", "context.warning_level", d.context["warning_level"]]
    +337                )
    +338
    +339            if d.recommended_env is not None:
    +340                rec_env = StringIO()
    +341                json_env = d.recommended_env.model_dump(
    +342                    mode="json", exclude_defaults=True
    +343                )
    +344                assert is_yaml_value(json_env)
    +345                write_yaml(json_env, rec_env)
    +346                rec_env_code = rec_env.getvalue().replace("\n", "</code><br><code>")
    +347                details.append(
    +348                    [
    +349                        "🐍",
    +350                        format_loc(d.loc),
    +351                        f"recommended conda env ({d.name})<br>"
    +352                        + f"<pre><code>{rec_env_code}</code></pre>",
    +353                    ]
    +354                )
    +355
    +356            if d.conda_compare:
    +357                details.append(
    +358                    [
    +359                        "🐍",
    +360                        format_loc(d.loc),
    +361                        "conda compare ({d.name}):<br>"
    +362                        + d.conda_compare.replace("\n", "<br>"),
    +363                    ]
    +364                )
    +365
    +366            for entry in d.errors:
    +367                details.append(
    +368                    [
    +369                        "❌",
    +370                        format_loc(entry.loc),
    +371                        entry.msg.replace("\n\n", "<br>").replace("\n", "<br>"),
    +372                    ]
    +373                )
    +374                if hide_tracebacks:
    +375                    continue
    +376
    +377                formatted_tb_lines: List[str] = []
    +378                for tb in entry.traceback:
    +379                    if not (tb_stripped := tb.strip()):
    +380                        continue
    +381
    +382                    first_tb_line, *tb_lines = tb_stripped.split("\n")
    +383                    if (
    +384                        first_tb_line.startswith('File "')
    +385                        and '", line' in first_tb_line
    +386                    ):
    +387                        path, where = first_tb_line[len('File "') :].split('", line')
    +388                        try:
    +389                            p = Path(path)
    +390                        except Exception:
    +391                            file_name = path
    +392                        else:
    +393                            path = p.as_posix()
    +394                            file_name = p.name
    +395
    +396                        where = ", line" + where
    +397                        first_tb_line = f'[{file_name}]({file_name} "{path}"){where}'
    +398
    +399                    if tb_lines:
    +400                        tb_rest = "<br>`" + "`<br>`".join(tb_lines) + "`"
    +401                    else:
    +402                        tb_rest = ""
    +403
    +404                    formatted_tb_lines.append(first_tb_line + tb_rest)
    +405
    +406                details.append(["", "", "<br>".join(formatted_tb_lines)])
    +407
    +408            for entry in d.warnings:
    +409                details.append(["⚠", format_loc(entry.loc), entry.msg])
    +410
    +411        return f"{info}{self._format_md_table(details)}"
    +412
    +413    # TODO: fix bug which casuses extensive white space between the info table and details table
    +414    @no_type_check
    +415    def display(self) -> None:
    +416        formatted = self.format()
    +417        try:
    +418            from IPython.core.getipython import get_ipython
    +419            from IPython.display import Markdown, display
    +420        except ImportError:
    +421            pass
    +422        else:
    +423            if get_ipython() is not None:
    +424                _ = display(Markdown(formatted))
    +425                return
    +426
    +427        rich_markdown = rich.markdown.Markdown(formatted)
    +428        console = rich.console.Console()
    +429        console.print(rich_markdown)
    +430
    +431    def add_detail(self, detail: ValidationDetail):
    +432        if detail.status == "failed":
    +433            self.status = "failed"
    +434        elif detail.status != "passed":
    +435            assert_never(detail.status)
    +436
    +437        self.details.append(detail)
    +438
    +439    @field_validator("env", mode="before")
    +440    def _convert_dict(cls, value: List[Union[List[str], Dict[str, str]]]):
    +441        """convert old env value for backwards compatibility"""
    +442        if isinstance(value, list):
    +443            return [
    +444                (
    +445                    (v["name"], v["version"], v.get("build", ""), v.get("channel", ""))
    +446                    if isinstance(v, dict) and "name" in v and "version" in v
    +447                    else v
    +448                )
    +449                for v in value
    +450            ]
    +451        else:
    +452            return value
    +
    + + +
    +
    +
    + Loc = +typing.Tuple[typing.Union[int, str], ...] + + +
    + + +

    location of error/warning in a nested data structure

    +
    + + +
    +
    +
    + WarningSeverityName = +typing.Literal['info', 'warning', 'alert'] + + +
    + + + + +
    +
    +
    + WarningLevelName = +typing.Literal['info', 'warning', 'alert', 'error'] + + +
    + + + + +
    +
    +
    + WARNING_SEVERITY_TO_NAME: Mapping[Literal[20, 30, 35], Literal['info', 'warning', 'alert']] = +mappingproxy({20: 'info', 30: 'warning', 35: 'alert'}) + + +
    + + + + +
    +
    +
    + WARNING_LEVEL_TO_NAME: Mapping[Literal[20, 30, 35, 50], Literal['info', 'warning', 'alert', 'error']] = +mappingproxy({20: 'info', 30: 'warning', 35: 'alert', 50: 'error'}) + + +
    + + + + +
    +
    +
    + WARNING_NAME_TO_LEVEL: Mapping[Literal['info', 'warning', 'alert', 'error'], Literal[20, 30, 35, 50]] = +mappingproxy({'info': 20, 'warning': 30, 'alert': 35, 'error': 50}) + + +
    + + + + +
    +
    + +
    + + class + ValidationEntry(pydantic.main.BaseModel): + + + +
    + +
    65class ValidationEntry(BaseModel):
    +66    """Base of `ErrorEntry` and `WarningEntry`"""
    +67
    +68    loc: Loc
    +69    msg: str
    +70    type: Union[ErrorType, str]
    +
    + + +

    Base of ErrorEntry and WarningEntry

    +
    + + +
    +
    + loc: Tuple[Union[int, str], ...] + + +
    + + + + +
    +
    +
    + msg: str + + +
    + + + + +
    +
    +
    + type: Union[Literal['no_such_attribute', 'json_invalid', 'json_type', 'recursion_loop', 'missing', 'frozen_field', 'frozen_instance', 'extra_forbidden', 'invalid_key', 'get_attribute_error', 'model_type', 'model_attributes_type', 'dataclass_type', 'dataclass_exact_type', 'none_required', 'greater_than', 'greater_than_equal', 'less_than', 'less_than_equal', 'multiple_of', 'finite_number', 'too_short', 'too_long', 'iterable_type', 'iteration_error', 'string_type', 'string_sub_type', 'string_unicode', 'string_too_short', 'string_too_long', 'string_pattern_mismatch', 'enum', 'dict_type', 'mapping_type', 'list_type', 'tuple_type', 'set_type', 'bool_type', 'bool_parsing', 'int_type', 'int_parsing', 'int_parsing_size', 'int_from_float', 'float_type', 'float_parsing', 'bytes_type', 'bytes_too_short', 'bytes_too_long', 'bytes_invalid_encoding', 'value_error', 'assertion_error', 'literal_error', 'date_type', 'date_parsing', 'date_from_datetime_parsing', 'date_from_datetime_inexact', 'date_past', 'date_future', 'time_type', 'time_parsing', 'datetime_type', 'datetime_parsing', 'datetime_object_invalid', 'datetime_from_date_parsing', 'datetime_past', 'datetime_future', 'timezone_naive', 'timezone_aware', 'timezone_offset', 'time_delta_type', 'time_delta_parsing', 'frozen_set_type', 'is_instance_of', 'is_subclass_of', 'callable_type', 'union_tag_invalid', 'union_tag_not_found', 'arguments_type', 'missing_argument', 'unexpected_keyword_argument', 'missing_keyword_only_argument', 'unexpected_positional_argument', 'missing_positional_only_argument', 'multiple_argument_values', 'url_type', 'url_parsing', 'url_syntax_violation', 'url_too_long', 'url_scheme', 'uuid_type', 'uuid_parsing', 'uuid_version', 'decimal_type', 'decimal_parsing', 'decimal_max_digits', 'decimal_max_places', 'decimal_whole_digits', 'complex_type', 'complex_str_parsing'], str] + + +
    + + + + +
    +
    +
    + +
    + + class + ErrorEntry(ValidationEntry): + + + +
    + +
    73class ErrorEntry(ValidationEntry):
    +74    """An error in a `ValidationDetail`"""
    +75
    +76    traceback: List[str] = Field(default_factory=list)
    +
    + + +

    An error in a ValidationDetail

    +
    + + +
    +
    + traceback: List[str] + + +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + class + WarningEntry(ValidationEntry): + + + +
    + +
     79class WarningEntry(ValidationEntry):
    + 80    """A warning in a `ValidationDetail`"""
    + 81
    + 82    severity: WarningSeverity = WARNING
    + 83    severity_name: WarningSeverityName = WARNING_NAME
    + 84
    + 85    @model_validator(mode="before")
    + 86    @classmethod
    + 87    def sync_severity_with_severity_name(
    + 88        cls, data: Union[Mapping[Any, Any], Any]
    + 89    ) -> Any:
    + 90        if is_mapping(data):
    + 91            data = dict(data)
    + 92            if (
    + 93                "severity" in data
    + 94                and "severity_name" not in data
    + 95                and data["severity"] in WARNING_SEVERITY_TO_NAME
    + 96            ):
    + 97                data["severity_name"] = WARNING_SEVERITY_TO_NAME[data["severity"]]
    + 98
    + 99            if (
    +100                "severity" in data
    +101                and "severity_name" not in data
    +102                and data["severity"] in WARNING_SEVERITY_TO_NAME
    +103            ):
    +104                data["severity"] = WARNING_NAME_TO_LEVEL[data["severity_name"]]
    +105
    +106        return data
    +
    + + +

    A warning in a ValidationDetail

    +
    + + +
    +
    + severity: Literal[20, 30, 35] + + +
    + + + + +
    +
    +
    + severity_name: Literal['info', 'warning', 'alert'] + + +
    + + + + +
    +
    + +
    +
    @model_validator(mode='before')
    +
    @classmethod
    + + def + sync_severity_with_severity_name(cls, data: Union[Mapping[Any, Any], Any]) -> Any: + + + +
    + +
     85    @model_validator(mode="before")
    + 86    @classmethod
    + 87    def sync_severity_with_severity_name(
    + 88        cls, data: Union[Mapping[Any, Any], Any]
    + 89    ) -> Any:
    + 90        if is_mapping(data):
    + 91            data = dict(data)
    + 92            if (
    + 93                "severity" in data
    + 94                and "severity_name" not in data
    + 95                and data["severity"] in WARNING_SEVERITY_TO_NAME
    + 96            ):
    + 97                data["severity_name"] = WARNING_SEVERITY_TO_NAME[data["severity"]]
    + 98
    + 99            if (
    +100                "severity" in data
    +101                and "severity_name" not in data
    +102                and data["severity"] in WARNING_SEVERITY_TO_NAME
    +103            ):
    +104                data["severity"] = WARNING_NAME_TO_LEVEL[data["severity_name"]]
    +105
    +106        return data
    +
    + + + + +
    +
    +
    Inherited Members
    +
    + +
    +
    +
    +
    + +
    + + def + format_loc(loc: Tuple[Union[int, str], ...], enclose_in: str = '`') -> str: + + + +
    + +
    109def format_loc(loc: Loc, enclose_in: str = "`") -> str:
    +110    """helper to format a location tuple `Loc` as Markdown string"""
    +111    if not loc:
    +112        loc = ("__root__",)
    +113
    +114    loc_str = ".".join(f"({x})" if x[0].isupper() else x for x in map(str, loc))
    +115
    +116    # additional field validation can make the location information quite convoluted, e.g.
    +117    # `weights.pytorch_state_dict.dependencies.source.function-after[validate_url_ok(), url['http','https']]` Input should be a valid URL, relative URL without a base
    +118    # therefore we remove the `.function-after[validate_url_ok(), url['http','https']]` here
    +119    brief_loc_str, *_ = loc_str.split(".function-after")
    +120    return f"{enclose_in}{brief_loc_str}{enclose_in}"
    +
    + + +

    helper to format a location tuple Loc as Markdown string

    +
    + + +
    +
    + +
    + + class + InstalledPackage(typing.NamedTuple): + + + +
    + +
    123class InstalledPackage(NamedTuple):
    +124    name: str
    +125    version: str
    +126    build: str = ""
    +127    channel: str = ""
    +
    + + +

    InstalledPackage(name, version, build, channel)

    +
    + + +
    +
    + + InstalledPackage(name: str, version: str, build: str = '', channel: str = '') + + +
    + + +

    Create new instance of InstalledPackage(name, version, build, channel)

    +
    + + +
    +
    +
    + name: str + + +
    + + +

    Alias for field number 0

    +
    + + +
    +
    +
    + version: str + + +
    + + +

    Alias for field number 1

    +
    + + +
    +
    +
    + build: str + + +
    + + +

    Alias for field number 2

    +
    + + +
    +
    +
    + channel: str + + +
    + + +

    Alias for field number 3

    +
    + + +
    +
    +
    + +
    + + class + ValidationContextSummary(typing_extensions.TypedDict): + + + +
    + +
    130class ValidationContextSummary(TypedDict):
    +131    perform_io_checks: bool
    +132    known_files: Mapping[str, str]
    +133    root: str
    +134    warning_level: str
    +
    + + + + +
    +
    + perform_io_checks: bool + + +
    + + + + +
    +
    +
    + known_files: Mapping[str, str] + + +
    + + + + +
    +
    +
    + root: str + + +
    + + + + +
    +
    +
    + warning_level: str + + +
    + + + + +
    +
    +
    + +
    + + class + ValidationDetail(pydantic.main.BaseModel): + + + +
    + +
    137class ValidationDetail(BaseModel, extra="allow"):
    +138    """a detail in a validation summary"""
    +139
    +140    name: str
    +141    status: Literal["passed", "failed"]
    +142    loc: Loc = ()
    +143    """location in the RDF that this detail applies to"""
    +144    errors: List[ErrorEntry] = Field(default_factory=list)
    +145    warnings: List[WarningEntry] = Field(default_factory=list)
    +146    context: Optional[ValidationContextSummary] = None
    +147
    +148    recommended_env: Optional[CondaEnv] = None
    +149    """recommended conda environemnt for this validation detail"""
    +150    conda_compare: Optional[str] = None
    +151    """output of `conda compare <recommended env>`"""
    +152
    +153    def model_post_init(self, __context: Any):
    +154        """create `conda_compare` default value if needed"""
    +155        super().model_post_init(__context)
    +156        if self.recommended_env is None or self.conda_compare is not None:
    +157            return
    +158
    +159        dumped_env = self.recommended_env.model_dump(mode="json")
    +160        if not is_yaml_value(dumped_env):
    +161            self.conda_compare = "Failed to dump recommended env to valid yaml"
    +162            return
    +163
    +164        with TemporaryDirectory() as d:
    +165            path = Path(d) / "env.yaml"
    +166            with path.open("w", encoding="utf-8") as f:
    +167                write_yaml(dumped_env, f)
    +168
    +169            compare_proc = subprocess.run(
    +170                ["conda", "compare", str(path)],
    +171                stdout=subprocess.PIPE,
    +172                stderr=subprocess.STDOUT,
    +173                shell=True,
    +174                text=True,
    +175            )
    +176            self.conda_compare = (
    +177                compare_proc.stdout
    +178                or f"conda compare exited with {compare_proc.returncode}"
    +179            )
    +180
    +181    def __str__(self):
    +182        return f"{self.__class__.__name__}:\n" + self.format()
    +183
    +184    @property
    +185    def status_icon(self):
    +186        if self.status == "passed":
    +187            return "✔️"
    +188        else:
    +189            return "❌"
    +190
    +191    def format(self, hide_tracebacks: bool = False, root_loc: Loc = ()) -> str:
    +192        """format as Markdown string"""
    +193        indent = "    " if root_loc else ""
    +194        errs_wrns = self._format_errors_and_warnings(
    +195            hide_tracebacks=hide_tracebacks, root_loc=root_loc
    +196        )
    +197        return f"{indent}{self.status_icon} {self.name.strip('.')}: {self.status}{errs_wrns}"
    +198
    +199    def _format_errors_and_warnings(self, hide_tracebacks: bool, root_loc: Loc):
    +200        indent = "    " if root_loc else ""
    +201        if hide_tracebacks:
    +202            tbs = [""] * len(self.errors)
    +203        else:
    +204            slim_tracebacks = [
    +205                [tt.replace("\n", "<br>") for t in e.traceback if (tt := t.strip())]
    +206                for e in self.errors
    +207            ]
    +208            tbs = [
    +209                ("<br>      Traceback:<br>      " if st else "") + "<br>      ".join(st)
    +210                for st in slim_tracebacks
    +211            ]
    +212
    +213        def join_parts(parts: Iterable[Tuple[str, str]]):
    +214            last_loc = None
    +215            lines: List[str] = []
    +216            for loc, msg in parts:
    +217                if loc == last_loc:
    +218                    lines.append(f"<br>  {loc} {msg}")
    +219                else:
    +220                    lines.append(f"<br>- {loc} {msg}")
    +221
    +222                last_loc = loc
    +223
    +224            return "".join(lines)
    +225
    +226        es = join_parts(
    +227            (format_loc(root_loc + e.loc), f"{e.msg}{tb}")
    +228            for e, tb in zip(self.errors, tbs)
    +229        )
    +230        ws = join_parts((format_loc(root_loc + w.loc), w.msg) for w in self.warnings)
    +231
    +232        return (
    +233            f"\n{indent}errors:\n{es}"
    +234            if es
    +235            else "" + f"\n{indent}warnings:\n{ws}" if ws else ""
    +236        )
    +
    + + +

    a detail in a validation summary

    +
    + + +
    +
    + name: str + + +
    + + + + +
    +
    +
    + status: Literal['passed', 'failed'] + + +
    + + + + +
    +
    +
    + loc: Tuple[Union[int, str], ...] + + +
    + + +

    location in the RDF that this detail applies to

    +
    + + +
    +
    +
    + errors: List[ErrorEntry] + + +
    + + + + +
    +
    +
    + warnings: List[WarningEntry] + + +
    + + + + +
    +
    +
    + context: Optional[ValidationContextSummary] + + +
    + + + + +
    +
    +
    + recommended_env: Optional[bioimageio.spec.conda_env.CondaEnv] + + +
    + + +

    recommended conda environemnt for this validation detail

    +
    + + +
    +
    +
    + conda_compare: Optional[str] + + +
    + + +

    output of conda compare <recommended env>

    +
    + + +
    +
    + +
    + + def + model_post_init(self, _ValidationDetail__context: Any): + + + +
    + +
    153    def model_post_init(self, __context: Any):
    +154        """create `conda_compare` default value if needed"""
    +155        super().model_post_init(__context)
    +156        if self.recommended_env is None or self.conda_compare is not None:
    +157            return
    +158
    +159        dumped_env = self.recommended_env.model_dump(mode="json")
    +160        if not is_yaml_value(dumped_env):
    +161            self.conda_compare = "Failed to dump recommended env to valid yaml"
    +162            return
    +163
    +164        with TemporaryDirectory() as d:
    +165            path = Path(d) / "env.yaml"
    +166            with path.open("w", encoding="utf-8") as f:
    +167                write_yaml(dumped_env, f)
    +168
    +169            compare_proc = subprocess.run(
    +170                ["conda", "compare", str(path)],
    +171                stdout=subprocess.PIPE,
    +172                stderr=subprocess.STDOUT,
    +173                shell=True,
    +174                text=True,
    +175            )
    +176            self.conda_compare = (
    +177                compare_proc.stdout
    +178                or f"conda compare exited with {compare_proc.returncode}"
    +179            )
    +
    + + +

    create conda_compare default value if needed

    +
    + + +
    +
    + +
    + status_icon + + + +
    + +
    184    @property
    +185    def status_icon(self):
    +186        if self.status == "passed":
    +187            return "✔️"
    +188        else:
    +189            return "❌"
    +
    + + + + +
    +
    + +
    + + def + format( self, hide_tracebacks: bool = False, root_loc: Tuple[Union[int, str], ...] = ()) -> str: + + + +
    + +
    191    def format(self, hide_tracebacks: bool = False, root_loc: Loc = ()) -> str:
    +192        """format as Markdown string"""
    +193        indent = "    " if root_loc else ""
    +194        errs_wrns = self._format_errors_and_warnings(
    +195            hide_tracebacks=hide_tracebacks, root_loc=root_loc
    +196        )
    +197        return f"{indent}{self.status_icon} {self.name.strip('.')}: {self.status}{errs_wrns}"
    +
    + + +

    format as Markdown string

    +
    + + +
    +
    +
    + +
    + + class + ValidationSummary(pydantic.main.BaseModel): + + + +
    + +
    239class ValidationSummary(BaseModel, extra="allow"):
    +240    """Summarizes output of all bioimageio validations and tests
    +241    for one specific `ResourceDescr` instance."""
    +242
    +243    name: str
    +244    source_name: str
    +245    type: str
    +246    format_version: str
    +247    status: Literal["passed", "failed"]
    +248    details: List[ValidationDetail]
    +249    env: Set[InstalledPackage] = Field(
    +250        default_factory=lambda: {
    +251            InstalledPackage(name="bioimageio.spec", version=VERSION)
    +252        }
    +253    )
    +254    """list of selected, relevant package versions"""
    +255
    +256    conda_list: Optional[Sequence[InstalledPackage]] = None
    +257    """parsed output of conda list"""
    +258
    +259    @property
    +260    def status_icon(self):
    +261        if self.status == "passed":
    +262            return "✔️"
    +263        else:
    +264            return "❌"
    +265
    +266    @property
    +267    def errors(self) -> List[ErrorEntry]:
    +268        return list(chain.from_iterable(d.errors for d in self.details))
    +269
    +270    @property
    +271    def warnings(self) -> List[WarningEntry]:
    +272        return list(chain.from_iterable(d.warnings for d in self.details))
    +273
    +274    def __str__(self):
    +275        return f"{self.__class__.__name__}:\n" + self.format()
    +276
    +277    @staticmethod
    +278    def _format_md_table(rows: List[List[str]]) -> str:
    +279        """format `rows` as markdown table"""
    +280        n_cols = len(rows[0])
    +281        assert all(len(row) == n_cols for row in rows)
    +282        col_widths = [max(max(len(row[i]) for row in rows), 3) for i in range(n_cols)]
    +283
    +284        # fix new lines in table cell
    +285        rows = [[line.replace("\n", "<br>") for line in r] for r in rows]
    +286
    +287        lines = [" | ".join(rows[0][i].center(col_widths[i]) for i in range(n_cols))]
    +288        lines.append(" | ".join("---".center(col_widths[i]) for i in range(n_cols)))
    +289        lines.extend(
    +290            [
    +291                " | ".join(row[i].ljust(col_widths[i]) for i in range(n_cols))
    +292                for row in rows[1:]
    +293            ]
    +294        )
    +295        return "\n| " + " |\n| ".join(lines) + " |\n"
    +296
    +297    def format(
    +298        self,
    +299        hide_tracebacks: bool = False,
    +300        hide_source: bool = False,
    +301        hide_env: bool = False,
    +302        root_loc: Loc = (),
    +303    ) -> str:
    +304        """Format summary as Markdown string
    +305
    +306        Suitable to embed in HTML using '<br>' instead of '\n'.
    +307        """
    +308        info = self._format_md_table(
    +309            [[self.status_icon, f"{self.name.strip('.').strip()} {self.status}"]]
    +310            + ([] if hide_source else [["source", self.source_name]])
    +311            + [
    +312                ["format version", f"{self.type} {self.format_version}"],
    +313            ]
    +314            + ([] if hide_env else [[e.name, e.version] for e in self.env])
    +315        )
    +316
    +317        def format_loc(loc: Loc):
    +318            return "`" + (".".join(map(str, root_loc + loc)) or ".") + "`"
    +319
    +320        details = [["❓", "location", "detail"]]
    +321        for d in self.details:
    +322            details.append([d.status_icon, format_loc(d.loc), d.name])
    +323            if d.context is not None:
    +324                details.append(
    +325                    [
    +326                        "🔍",
    +327                        "context.perform_io_checks",
    +328                        str(d.context["perform_io_checks"]),
    +329                    ]
    +330                )
    +331                if d.context["perform_io_checks"]:
    +332                    details.append(["🔍", "context.root", d.context["root"]])
    +333                    for kfn, sha in d.context["known_files"].items():
    +334                        details.append(["🔍", f"context.known_files.{kfn}", sha])
    +335
    +336                details.append(
    +337                    ["🔍", "context.warning_level", d.context["warning_level"]]
    +338                )
    +339
    +340            if d.recommended_env is not None:
    +341                rec_env = StringIO()
    +342                json_env = d.recommended_env.model_dump(
    +343                    mode="json", exclude_defaults=True
    +344                )
    +345                assert is_yaml_value(json_env)
    +346                write_yaml(json_env, rec_env)
    +347                rec_env_code = rec_env.getvalue().replace("\n", "</code><br><code>")
    +348                details.append(
    +349                    [
    +350                        "🐍",
    +351                        format_loc(d.loc),
    +352                        f"recommended conda env ({d.name})<br>"
    +353                        + f"<pre><code>{rec_env_code}</code></pre>",
    +354                    ]
    +355                )
    +356
    +357            if d.conda_compare:
    +358                details.append(
    +359                    [
    +360                        "🐍",
    +361                        format_loc(d.loc),
    +362                        "conda compare ({d.name}):<br>"
    +363                        + d.conda_compare.replace("\n", "<br>"),
    +364                    ]
    +365                )
    +366
    +367            for entry in d.errors:
    +368                details.append(
    +369                    [
    +370                        "❌",
    +371                        format_loc(entry.loc),
    +372                        entry.msg.replace("\n\n", "<br>").replace("\n", "<br>"),
    +373                    ]
    +374                )
    +375                if hide_tracebacks:
    +376                    continue
    +377
    +378                formatted_tb_lines: List[str] = []
    +379                for tb in entry.traceback:
    +380                    if not (tb_stripped := tb.strip()):
    +381                        continue
    +382
    +383                    first_tb_line, *tb_lines = tb_stripped.split("\n")
    +384                    if (
    +385                        first_tb_line.startswith('File "')
    +386                        and '", line' in first_tb_line
    +387                    ):
    +388                        path, where = first_tb_line[len('File "') :].split('", line')
    +389                        try:
    +390                            p = Path(path)
    +391                        except Exception:
    +392                            file_name = path
    +393                        else:
    +394                            path = p.as_posix()
    +395                            file_name = p.name
    +396
    +397                        where = ", line" + where
    +398                        first_tb_line = f'[{file_name}]({file_name} "{path}"){where}'
    +399
    +400                    if tb_lines:
    +401                        tb_rest = "<br>`" + "`<br>`".join(tb_lines) + "`"
    +402                    else:
    +403                        tb_rest = ""
    +404
    +405                    formatted_tb_lines.append(first_tb_line + tb_rest)
    +406
    +407                details.append(["", "", "<br>".join(formatted_tb_lines)])
    +408
    +409            for entry in d.warnings:
    +410                details.append(["⚠", format_loc(entry.loc), entry.msg])
    +411
    +412        return f"{info}{self._format_md_table(details)}"
    +413
    +414    # TODO: fix bug which casuses extensive white space between the info table and details table
    +415    @no_type_check
    +416    def display(self) -> None:
    +417        formatted = self.format()
    +418        try:
    +419            from IPython.core.getipython import get_ipython
    +420            from IPython.display import Markdown, display
    +421        except ImportError:
    +422            pass
    +423        else:
    +424            if get_ipython() is not None:
    +425                _ = display(Markdown(formatted))
    +426                return
    +427
    +428        rich_markdown = rich.markdown.Markdown(formatted)
    +429        console = rich.console.Console()
    +430        console.print(rich_markdown)
    +431
    +432    def add_detail(self, detail: ValidationDetail):
    +433        if detail.status == "failed":
    +434            self.status = "failed"
    +435        elif detail.status != "passed":
    +436            assert_never(detail.status)
    +437
    +438        self.details.append(detail)
    +439
    +440    @field_validator("env", mode="before")
    +441    def _convert_dict(cls, value: List[Union[List[str], Dict[str, str]]]):
    +442        """convert old env value for backwards compatibility"""
    +443        if isinstance(value, list):
    +444            return [
    +445                (
    +446                    (v["name"], v["version"], v.get("build", ""), v.get("channel", ""))
    +447                    if isinstance(v, dict) and "name" in v and "version" in v
    +448                    else v
    +449                )
    +450                for v in value
    +451            ]
    +452        else:
    +453            return value
    +
    + + +

    Summarizes output of all bioimageio validations and tests +for one specific ResourceDescr instance.

    +
    + + +
    +
    + name: str + + +
    + + + + +
    +
    +
    + source_name: str + + +
    + + + + +
    +
    +
    + type: str + + +
    + + + + +
    +
    +
    + format_version: str + + +
    + + + + +
    +
    +
    + status: Literal['passed', 'failed'] + + +
    + + + + +
    +
    +
    + details: List[ValidationDetail] + + +
    + + + + +
    +
    +
    + env: Set[InstalledPackage] + + +
    + + +

    list of selected, relevant package versions

    +
    + + +
    +
    +
    + conda_list: Optional[Sequence[InstalledPackage]] + + +
    + + +

    parsed output of conda list

    +
    + + +
    +
    + +
    + status_icon + + + +
    + +
    259    @property
    +260    def status_icon(self):
    +261        if self.status == "passed":
    +262            return "✔️"
    +263        else:
    +264            return "❌"
    +
    + + + + +
    +
    + +
    + errors: List[ErrorEntry] + + + +
    + +
    266    @property
    +267    def errors(self) -> List[ErrorEntry]:
    +268        return list(chain.from_iterable(d.errors for d in self.details))
    +
    + + + + +
    +
    + +
    + warnings: List[WarningEntry] + + + +
    + +
    270    @property
    +271    def warnings(self) -> List[WarningEntry]:
    +272        return list(chain.from_iterable(d.warnings for d in self.details))
    +
    + + + + +
    +
    + +
    + + def + format( self, hide_tracebacks: bool = False, hide_source: bool = False, hide_env: bool = False, root_loc: Tuple[Union[int, str], ...] = ()) -> str: + + + +
    + +
    297    def format(
    +298        self,
    +299        hide_tracebacks: bool = False,
    +300        hide_source: bool = False,
    +301        hide_env: bool = False,
    +302        root_loc: Loc = (),
    +303    ) -> str:
    +304        """Format summary as Markdown string
    +305
    +306        Suitable to embed in HTML using '<br>' instead of '\n'.
    +307        """
    +308        info = self._format_md_table(
    +309            [[self.status_icon, f"{self.name.strip('.').strip()} {self.status}"]]
    +310            + ([] if hide_source else [["source", self.source_name]])
    +311            + [
    +312                ["format version", f"{self.type} {self.format_version}"],
    +313            ]
    +314            + ([] if hide_env else [[e.name, e.version] for e in self.env])
    +315        )
    +316
    +317        def format_loc(loc: Loc):
    +318            return "`" + (".".join(map(str, root_loc + loc)) or ".") + "`"
    +319
    +320        details = [["❓", "location", "detail"]]
    +321        for d in self.details:
    +322            details.append([d.status_icon, format_loc(d.loc), d.name])
    +323            if d.context is not None:
    +324                details.append(
    +325                    [
    +326                        "🔍",
    +327                        "context.perform_io_checks",
    +328                        str(d.context["perform_io_checks"]),
    +329                    ]
    +330                )
    +331                if d.context["perform_io_checks"]:
    +332                    details.append(["🔍", "context.root", d.context["root"]])
    +333                    for kfn, sha in d.context["known_files"].items():
    +334                        details.append(["🔍", f"context.known_files.{kfn}", sha])
    +335
    +336                details.append(
    +337                    ["🔍", "context.warning_level", d.context["warning_level"]]
    +338                )
    +339
    +340            if d.recommended_env is not None:
    +341                rec_env = StringIO()
    +342                json_env = d.recommended_env.model_dump(
    +343                    mode="json", exclude_defaults=True
    +344                )
    +345                assert is_yaml_value(json_env)
    +346                write_yaml(json_env, rec_env)
    +347                rec_env_code = rec_env.getvalue().replace("\n", "</code><br><code>")
    +348                details.append(
    +349                    [
    +350                        "🐍",
    +351                        format_loc(d.loc),
    +352                        f"recommended conda env ({d.name})<br>"
    +353                        + f"<pre><code>{rec_env_code}</code></pre>",
    +354                    ]
    +355                )
    +356
    +357            if d.conda_compare:
    +358                details.append(
    +359                    [
    +360                        "🐍",
    +361                        format_loc(d.loc),
    +362                        "conda compare ({d.name}):<br>"
    +363                        + d.conda_compare.replace("\n", "<br>"),
    +364                    ]
    +365                )
    +366
    +367            for entry in d.errors:
    +368                details.append(
    +369                    [
    +370                        "❌",
    +371                        format_loc(entry.loc),
    +372                        entry.msg.replace("\n\n", "<br>").replace("\n", "<br>"),
    +373                    ]
    +374                )
    +375                if hide_tracebacks:
    +376                    continue
    +377
    +378                formatted_tb_lines: List[str] = []
    +379                for tb in entry.traceback:
    +380                    if not (tb_stripped := tb.strip()):
    +381                        continue
    +382
    +383                    first_tb_line, *tb_lines = tb_stripped.split("\n")
    +384                    if (
    +385                        first_tb_line.startswith('File "')
    +386                        and '", line' in first_tb_line
    +387                    ):
    +388                        path, where = first_tb_line[len('File "') :].split('", line')
    +389                        try:
    +390                            p = Path(path)
    +391                        except Exception:
    +392                            file_name = path
    +393                        else:
    +394                            path = p.as_posix()
    +395                            file_name = p.name
    +396
    +397                        where = ", line" + where
    +398                        first_tb_line = f'[{file_name}]({file_name} "{path}"){where}'
    +399
    +400                    if tb_lines:
    +401                        tb_rest = "<br>`" + "`<br>`".join(tb_lines) + "`"
    +402                    else:
    +403                        tb_rest = ""
    +404
    +405                    formatted_tb_lines.append(first_tb_line + tb_rest)
    +406
    +407                details.append(["", "", "<br>".join(formatted_tb_lines)])
    +408
    +409            for entry in d.warnings:
    +410                details.append(["⚠", format_loc(entry.loc), entry.msg])
    +411
    +412        return f"{info}{self._format_md_table(details)}"
    +
    + + +

    Format summary as Markdown string

    + +
        Suitable to embed in HTML using '<br>' instead of '
    +
    + +

    '.

    +
    + + +
    +
    + +
    +
    @no_type_check
    + + def + display(self) -> None: + + + +
    + +
    415    @no_type_check
    +416    def display(self) -> None:
    +417        formatted = self.format()
    +418        try:
    +419            from IPython.core.getipython import get_ipython
    +420            from IPython.display import Markdown, display
    +421        except ImportError:
    +422            pass
    +423        else:
    +424            if get_ipython() is not None:
    +425                _ = display(Markdown(formatted))
    +426                return
    +427
    +428        rich_markdown = rich.markdown.Markdown(formatted)
    +429        console = rich.console.Console()
    +430        console.print(rich_markdown)
    +
    + + + + +
    +
    + +
    + + def + add_detail(self, detail: ValidationDetail): + + + +
    + +
    432    def add_detail(self, detail: ValidationDetail):
    +433        if detail.status == "failed":
    +434            self.status = "failed"
    +435        elif detail.status != "passed":
    +436            assert_never(detail.status)
    +437
    +438        self.details.append(detail)
    +
    + + + + +
    +
    +
    + + \ No newline at end of file diff --git a/bioimageio/spec/utils.html b/bioimageio/spec/utils.html new file mode 100644 index 00000000..0d93d57f --- /dev/null +++ b/bioimageio/spec/utils.html @@ -0,0 +1,892 @@ + + + + + + + bioimageio.spec.utils API documentation + + + + + + + + + + +
    +
    +

    +bioimageio.spec.utils

    + + + + + + +
     1import json
    + 2from typing import List, TypedDict
    + 3
    + 4from ._description import ensure_description_is_dataset, ensure_description_is_model
    + 5from ._internal.io import (
    + 6    download,
    + 7    extract_file_name,
    + 8    get_sha256,
    + 9    identify_bioimageio_yaml_file_name,
    +10    is_valid_bioimageio_yaml_name,
    +11)
    +12from ._internal.io_utils import load_array, save_array
    +13from ._internal.utils import files
    +14
    +15__all__ = [
    +16    "download",
    +17    "ensure_description_is_dataset",
    +18    "ensure_description_is_model",
    +19    "extract_file_name",
    +20    "get_sha256",
    +21    "get_spdx_licenses",
    +22    "identify_bioimageio_yaml_file_name",
    +23    "is_valid_bioimageio_yaml_name",
    +24    "load_array",
    +25    "save_array",
    +26    "SpdxLicenseEntry",
    +27    "SpdxLicenses",
    +28]
    +29
    +30
    +31class SpdxLicenseEntry(TypedDict):
    +32    isDeprecatedLicenseId: bool
    +33    isKnownByZenodo: bool
    +34    isOsiApproved: bool
    +35    licenseId: str
    +36    name: str
    +37    reference: str
    +38
    +39
    +40class SpdxLicenses(TypedDict):
    +41    licenseListVersion: str
    +42    licenses: List[SpdxLicenseEntry]
    +43    releaseDate: str
    +44
    +45
    +46def get_spdx_licenses() -> SpdxLicenses:
    +47    """get details of the SPDX licenses known to bioimageio.spec"""
    +48    with files("bioimageio.spec").joinpath("static/spdx_licenses.json").open(
    +49        "r", encoding="utf-8"
    +50    ) as f:
    +51        return json.load(f)
    +
    + + +
    +
    + +
    + + def + download( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], str, Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], bioimageio.spec._internal.io.FileDescr, zipp.Path], /, progressbar: Union[bioimageio.spec._internal.io.Progressbar, bool, NoneType] = None, **kwargs: Unpack[bioimageio.spec._internal.io.HashKwargs]) -> Union[bioimageio.spec._internal.io.LocalFile, bioimageio.spec._internal.io.FileInZip]: + + + +
    + +
    658def resolve(
    +659    source: Union[PermissiveFileSource, FileDescr, ZipPath],
    +660    /,
    +661    progressbar: Union[Progressbar, bool, None] = None,
    +662    **kwargs: Unpack[HashKwargs],
    +663) -> Union[LocalFile, FileInZip]:
    +664    """Resolve file `source` (download if needed)"""
    +665    if isinstance(source, FileDescr):
    +666        return source.download()
    +667    elif isinstance(source, ZipPath):
    +668        zip_root = source.root
    +669        assert isinstance(zip_root, ZipFile)
    +670        return FileInZip(
    +671            source,
    +672            zip_root,
    +673            extract_file_name(source),
    +674        )
    +675
    +676    strict_source = interprete_file_source(source)
    +677    if isinstance(strict_source, RelativeFilePath):
    +678        strict_source = strict_source.absolute()
    +679        if isinstance(strict_source, ZipPath):
    +680            return FileInZip(
    +681                strict_source, strict_source.root, extract_file_name(strict_source)
    +682            )
    +683
    +684    if isinstance(strict_source, PurePath):
    +685        if not strict_source.exists():
    +686            raise FileNotFoundError(strict_source)
    +687        local_source = strict_source
    +688        root: Union[RootHttpUrl, DirectoryPath] = strict_source.parent
    +689    else:
    +690        if strict_source.scheme not in ("http", "https"):
    +691            raise NotImplementedError(strict_source.scheme)
    +692
    +693        if settings.CI:
    +694            headers = {"User-Agent": "ci"}
    +695            if progressbar is None:
    +696                progressbar = False
    +697        else:
    +698            headers = {}
    +699            if progressbar is None:
    +700                progressbar = True
    +701
    +702        if settings.user_agent is not None:
    +703            headers["User-Agent"] = settings.user_agent
    +704
    +705        downloader = pooch.HTTPDownloader(
    +706            headers=headers,
    +707            progressbar=progressbar,  # pyright: ignore[reportArgumentType]
    +708        )
    +709        fname = _get_unique_file_name(strict_source)
    +710        _ls: Any = pooch.retrieve(
    +711            url=str(strict_source),
    +712            known_hash=_get_known_hash(kwargs),
    +713            downloader=downloader,
    +714            fname=fname,
    +715            path=settings.cache_path,
    +716        )
    +717        local_source = Path(_ls).absolute()
    +718        root = strict_source.parent
    +719
    +720    return LocalFile(
    +721        local_source,
    +722        root,
    +723        extract_file_name(strict_source),
    +724    )
    +
    + + +

    Resolve file source (download if needed)

    +
    + + +
    +
    + +
    + + def + ensure_description_is_dataset( rd: Union[bioimageio.spec.InvalidDescr, Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.NotebookDescr, bioimageio.spec.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]]) -> Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]: + + + +
    + +
    214def ensure_description_is_dataset(
    +215    rd: Union[InvalidDescr, ResourceDescr],
    +216) -> AnyDatasetDescr:
    +217    if isinstance(rd, InvalidDescr):
    +218        rd.validation_summary.display()
    +219        raise ValueError("resource description is invalid")
    +220
    +221    if rd.type != "dataset":
    +222        rd.validation_summary.display()
    +223        raise ValueError(
    +224            f"expected a dataset resource, but got resource type '{rd.type}'"
    +225        )
    +226
    +227    assert not isinstance(
    +228        rd,
    +229        (
    +230            GenericDescr_v0_2,
    +231            GenericDescr_v0_3,
    +232        ),
    +233    )
    +234
    +235    return rd
    +
    + + + + +
    +
    + +
    + + def + ensure_description_is_model( rd: Union[bioimageio.spec.InvalidDescr, Annotated[Union[Annotated[Union[bioimageio.spec.application.v0_2.ApplicationDescr, bioimageio.spec.ApplicationDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.dataset.v0_2.DatasetDescr, bioimageio.spec.DatasetDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.NotebookDescr, bioimageio.spec.NotebookDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]], Discriminator(discriminator='type', custom_error_type=None, custom_error_message=None, custom_error_context=None)], Annotated[Union[bioimageio.spec.generic.v0_2.GenericDescr, bioimageio.spec.GenericDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]]) -> Annotated[Union[bioimageio.spec.model.v0_4.ModelDescr, bioimageio.spec.ModelDescr], Discriminator(discriminator='format_version', custom_error_type=None, custom_error_message=None, custom_error_context=None)]: + + + +
    + +
    186def ensure_description_is_model(
    +187    rd: Union[InvalidDescr, ResourceDescr],
    +188) -> AnyModelDescr:
    +189    """
    +190    Raises:
    +191        ValueError: for invalid or non-model resources
    +192    """
    +193    if isinstance(rd, InvalidDescr):
    +194        rd.validation_summary.display()
    +195        raise ValueError("resource description is invalid")
    +196
    +197    if rd.type != "model":
    +198        rd.validation_summary.display()
    +199        raise ValueError(
    +200            f"expected a model resource, but got resource type '{rd.type}'"
    +201        )
    +202
    +203    assert not isinstance(
    +204        rd,
    +205        (
    +206            GenericDescr_v0_2,
    +207            GenericDescr_v0_3,
    +208        ),
    +209    )
    +210
    +211    return rd
    +
    + + +
    Raises:
    + +
      +
    • ValueError: for invalid or non-model resources
    • +
    +
    + + +
    +
    + +
    + + def + extract_file_name( src: Union[Annotated[pydantic_core._pydantic_core.Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'], host_required=None, default_host=None, default_port=None, default_path=None)], bioimageio.spec._internal.url.HttpUrl, pathlib.PurePath, bioimageio.spec._internal.io.RelativeFilePath, zipp.Path]) -> str: + + + +
    + +
    803def extract_file_name(
    +804    src: Union[pydantic.HttpUrl, HttpUrl, PurePath, RelativeFilePath, ZipPath],
    +805) -> FileName:
    +806    if isinstance(src, ZipPath):
    +807        return src.name or src.root.filename or "bioimageio.zip"
    +808    elif isinstance(src, RelativeFilePath):
    +809        return src.path.name
    +810    elif isinstance(src, PurePath):
    +811        return src.name
    +812    else:
    +813        url = urlparse(str(src))
    +814        if (
    +815            url.scheme == "https"
    +816            and url.hostname == "zenodo.org"
    +817            and url.path.startswith("/api/records/")
    +818            and url.path.endswith("/content")
    +819        ):
    +820            return url.path.split("/")[-2]
    +821        else:
    +822            return url.path.split("/")[-1]
    +
    + + + + +
    +
    + +
    +
    @lru_cache
    + + def + get_sha256( path: Union[pathlib.Path, zipp.Path]) -> bioimageio.spec._internal.io_basics.Sha256: + + + +
    + +
    825@lru_cache
    +826def get_sha256(path: Union[Path, ZipPath]) -> Sha256:
    +827    """from https://stackoverflow.com/a/44873382"""
    +828    desc = f"computing SHA256 of {path.name}"
    +829    if isinstance(path, ZipPath):
    +830        # no buffered reading available
    +831        zf = path.root
    +832        assert isinstance(zf, ZipFile)
    +833        file_size = zf.NameToInfo[path.at].file_size
    +834        pbar = tqdm(desc=desc, total=file_size)
    +835        data = path.read_bytes()
    +836        assert isinstance(data, bytes)
    +837        h = hashlib.sha256(data)
    +838    else:
    +839        file_size = path.stat().st_size
    +840        pbar = tqdm(desc=desc, total=file_size)
    +841        h = hashlib.sha256()
    +842        chunksize = 128 * 1024
    +843        b = bytearray(chunksize)
    +844        mv = memoryview(b)
    +845        with open(path, "rb", buffering=0) as f:
    +846            for n in iter(lambda: f.readinto(mv), 0):
    +847                h.update(mv[:n])
    +848                _ = pbar.update(n)
    +849
    +850    sha = h.hexdigest()
    +851    pbar.set_description(desc=desc + f" (result: {sha})")
    +852    pbar.close()
    +853    assert len(sha) == 64
    +854    return Sha256(sha)
    +
    + + + + + +
    +
    + +
    + + def + get_spdx_licenses() -> SpdxLicenses: + + + +
    + +
    47def get_spdx_licenses() -> SpdxLicenses:
    +48    """get details of the SPDX licenses known to bioimageio.spec"""
    +49    with files("bioimageio.spec").joinpath("static/spdx_licenses.json").open(
    +50        "r", encoding="utf-8"
    +51    ) as f:
    +52        return json.load(f)
    +
    + + +

    get details of the SPDX licenses known to bioimageio.spec

    +
    + + +
    +
    + +
    + + def + identify_bioimageio_yaml_file_name(file_names: Iterable[str]) -> str: + + + +
    + +
    429def identify_bioimageio_yaml_file_name(file_names: Iterable[FileName]) -> FileName:
    +430    file_names = sorted(file_names)
    +431    for bioimageio_name in ALL_BIOIMAGEIO_YAML_NAMES:
    +432        for file_name in file_names:
    +433            if file_name == bioimageio_name or file_name.endswith(
    +434                "." + bioimageio_name
    +435            ):
    +436                return file_name
    +437
    +438    raise ValueError(
    +439        f"No {BIOIMAGEIO_YAML} found in {file_names}. (Looking for '{BIOIMAGEIO_YAML}'"
    +440        + " or or any of the alterntive file names:"
    +441        + f" {ALTERNATIVE_BIOIMAGEIO_YAML_NAMES}, or any file with an extension of"
    +442        + f"  those, e.g. 'anything.{BIOIMAGEIO_YAML}')."
    +443    )
    +
    + + + + +
    +
    + +
    + + def + is_valid_bioimageio_yaml_name(file_name: str) -> bool: + + + +
    + +
    421def is_valid_bioimageio_yaml_name(file_name: FileName) -> bool:
    +422    for bioimageio_name in ALL_BIOIMAGEIO_YAML_NAMES:
    +423        if file_name == bioimageio_name or file_name.endswith("." + bioimageio_name):
    +424            return True
    +425
    +426    return False
    +
    + + + + +
    +
    + +
    + + def + load_array( source: Union[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')])], bioimageio.spec._internal.io.FileDescr, zipp.Path]) -> numpy.ndarray[typing.Any, numpy.dtype[typing.Any]]: + + + +
    + +
    235def load_array(source: Union[FileSource, FileDescr, ZipPath]) -> NDArray[Any]:
    +236    path = download(source).path
    +237    with path.open(mode="rb") as f:
    +238        assert not isinstance(f, io.TextIOWrapper)
    +239        return numpy.load(f, allow_pickle=False)
    +
    + + + + +
    +
    + +
    + + def + save_array( path: Union[pathlib.Path, zipp.Path], array: numpy.ndarray[typing.Any, numpy.dtype[typing.Any]]) -> None: + + + +
    + +
    242def save_array(path: Union[Path, ZipPath], array: NDArray[Any]) -> None:
    +243    with path.open(mode="wb") as f:
    +244        assert not isinstance(f, io.TextIOWrapper)
    +245        return numpy.save(f, array, allow_pickle=False)
    +
    + + + + +
    +
    + +
    + + class + SpdxLicenseEntry(typing.TypedDict): + + + +
    + +
    32class SpdxLicenseEntry(TypedDict):
    +33    isDeprecatedLicenseId: bool
    +34    isKnownByZenodo: bool
    +35    isOsiApproved: bool
    +36    licenseId: str
    +37    name: str
    +38    reference: str
    +
    + + + + +
    +
    + isDeprecatedLicenseId: bool + + +
    + + + + +
    +
    +
    + isKnownByZenodo: bool + + +
    + + + + +
    +
    +
    + isOsiApproved: bool + + +
    + + + + +
    +
    +
    + licenseId: str + + +
    + + + + +
    +
    +
    + name: str + + +
    + + + + +
    +
    +
    + reference: str + + +
    + + + + +
    +
    +
    + +
    + + class + SpdxLicenses(typing.TypedDict): + + + +
    + +
    41class SpdxLicenses(TypedDict):
    +42    licenseListVersion: str
    +43    licenses: List[SpdxLicenseEntry]
    +44    releaseDate: str
    +
    + + + + +
    +
    + licenseListVersion: str + + +
    + + + + +
    +
    +
    + licenses: List[SpdxLicenseEntry] + + +
    + + + + +
    +
    +
    + releaseDate: str + + +
    + + + + +
    +
    +
    + + \ No newline at end of file diff --git a/coverage/class_index.html b/coverage/class_index.html index f52fbac6..23ddeb22 100644 --- a/coverage/class_index.html +++ b/coverage/class_index.html @@ -11,7 +11,7 @@

    Coverage report: - 63% + 73%

    @@ -75,10 +75,10 @@

    bioimageio/core/__init__.py (no class) - 19 + 22 0 0 - 100% + 100% bioimageio/core/__main__.py @@ -131,26 +131,26 @@

    bioimageio/core/_prediction_pipeline.py PredictionPipeline - 69 - 33 + 68 + 8 0 - 52% + 88% bioimageio/core/_prediction_pipeline.py (no class) 46 4 - 0 + 1 91% bioimageio/core/_resource_tests.py (no class) - 131 - 28 + 169 + 43 0 - 79% + 75% bioimageio/core/_settings.py @@ -163,26 +163,26 @@

    bioimageio/core/_settings.py (no class) - 9 + 10 0 0 - 100% + 100% bioimageio/core/axis.py Axis - 9 - 3 - 0 - 67% + 8 + 2 + 1 + 75% bioimageio/core/axis.py AxisInfo - 20 - 7 - 0 - 65% + 19 + 6 + 1 + 68% bioimageio/core/axis.py @@ -196,9 +196,9 @@

    bioimageio/core/block.py Block 10 - 10 + 1 0 - 0% + 90% bioimageio/core/block.py @@ -212,69 +212,69 @@

    bioimageio/core/block_meta.py LinearAxisTransform 1 - 1 0 - 0% + 0 + 100% bioimageio/core/block_meta.py BlockMeta 19 - 19 + 5 0 - 0% + 74% bioimageio/core/block_meta.py (no class) - 103 - 53 + 106 + 15 0 - 49% + 86% - bioimageio/core/cli.py - CmdBase + bioimageio/core/cli.py + CmdBase 0 0 0 100% - bioimageio/core/cli.py - ArgMixin + bioimageio/core/cli.py + ArgMixin 0 0 0 100% - bioimageio/core/cli.py - WithSource + bioimageio/core/cli.py + WithSource 4 - 3 + 1 0 - 25% + 75% - bioimageio/core/cli.py - ValidateFormatCmd + bioimageio/core/cli.py + ValidateFormatCmd 1 1 0 0% - bioimageio/core/cli.py - TestCmd + bioimageio/core/cli.py + TestCmd 1 0 0 100% - bioimageio/core/cli.py - PackageCmd + bioimageio/core/cli.py + PackageCmd 4 2 0 @@ -283,14 +283,14 @@

    bioimageio/core/cli.py PredictCmd - 94 - 94 - 0 - 0% + 97 + 22 + 1 + 77% - bioimageio/core/cli.py - Bioimageio + bioimageio/core/cli.py + Bioimageio 10 0 0 @@ -299,26 +299,26 @@

    bioimageio/core/cli.py (no class) - 151 - 54 + 155 + 29 0 - 64% + 81% bioimageio/core/commands.py (no class) 29 - 7 + 5 0 - 76% + 83% bioimageio/core/common.py _LeftRight - 7 - 2 + 6 0 - 71% + 1 + 100% bioimageio/core/common.py @@ -377,8 +377,8 @@

    100% - bioimageio/core/digest_spec.py - IO_SampleBlockMeta + bioimageio/core/digest_spec.py + IO_SampleBlockMeta 0 0 0 @@ -387,14 +387,14 @@

    bioimageio/core/digest_spec.py (no class) - 171 - 57 - 0 - 67% + 162 + 17 + 4 + 90% - bioimageio/core/io.py - _SerializedDatasetStatsEntry + bioimageio/core/io.py + _SerializedDatasetStatsEntry 0 0 0 @@ -403,10 +403,10 @@

    bioimageio/core/io.py (no class) - 50 - 25 + 86 + 11 0 - 50% + 87% bioimageio/core/model_adapters/__init__.py @@ -468,9 +468,9 @@

    bioimageio/core/model_adapters/_pytorch_model_adapter.py PytorchModelAdapter 44 - 9 + 8 0 - 80% + 82% bioimageio/core/model_adapters/_pytorch_model_adapter.py @@ -532,17 +532,17 @@

    bioimageio/core/prediction.py (no class) 63 - 44 + 26 0 - 30% + 59% bioimageio/core/proc_ops.py _SimpleOperator - 15 - 5 - 0 - 67% + 14 + 4 + 1 + 71% bioimageio/core/proc_ops.py @@ -563,10 +563,10 @@

    bioimageio/core/proc_ops.py Binarize - 7 6 - 0 - 14% + 5 + 1 + 17% bioimageio/core/proc_ops.py @@ -587,10 +587,10 @@

    bioimageio/core/proc_ops.py ScaleLinear - 16 - 5 - 0 - 69% + 15 + 4 + 1 + 73% bioimageio/core/proc_ops.py @@ -627,54 +627,54 @@

    bioimageio/core/proc_ops.py FixedZeroMeanUnitVariance - 16 - 14 - 0 - 12% + 15 + 13 + 1 + 13% bioimageio/core/proc_ops.py (no class) - 187 - 14 - 0 - 93% + 184 + 11 + 3 + 94% - bioimageio/core/proc_setup.py - PreAndPostprocessing + bioimageio/core/proc_setup.py + PreAndPostprocessing 0 0 0 100% - bioimageio/core/proc_setup.py - _SetupProcessing + bioimageio/core/proc_setup.py + _SetupProcessing 0 0 0 100% - bioimageio/core/proc_setup.py - RequiredMeasures + bioimageio/core/proc_setup.py + RequiredMeasures 0 0 0 100% - bioimageio/core/proc_setup.py - RequiredDatasetMeasures + bioimageio/core/proc_setup.py + RequiredDatasetMeasures 0 0 0 100% - bioimageio/core/proc_setup.py - RequiredSampleMeasures + bioimageio/core/proc_setup.py + RequiredSampleMeasures 0 0 0 @@ -683,74 +683,74 @@

    bioimageio/core/proc_setup.py (no class) - 84 - 14 - 0 - 83% + 82 + 11 + 1 + 87% - bioimageio/core/sample.py - Sample + bioimageio/core/sample.py + Sample 22 - 21 + 4 0 - 5% + 82% - bioimageio/core/sample.py - SampleBlockBase + bioimageio/core/sample.py + SampleBlockBase 2 2 0 0% - bioimageio/core/sample.py - LinearSampleAxisTransform + bioimageio/core/sample.py + LinearSampleAxisTransform 0 0 0 100% - bioimageio/core/sample.py - SampleBlockMeta - 10 - 10 + bioimageio/core/sample.py + SampleBlockMeta + 11 + 1 0 - 0% + 91% - bioimageio/core/sample.py - SampleBlock - 2 + bioimageio/core/sample.py + SampleBlock 2 0 - 0% + 0 + 100% - bioimageio/core/sample.py - SampleBlockWithOrigin + bioimageio/core/sample.py + SampleBlockWithOrigin 0 0 0 100% - bioimageio/core/sample.py - _ConsolidatedMemberBlocks - 7 + bioimageio/core/sample.py + _ConsolidatedMemberBlocks 7 0 - 0% + 0 + 100% bioimageio/core/sample.py (no class) 71 - 6 0 - 92% + 0 + 100% bioimageio/core/stat_calculators.py @@ -811,10 +811,10 @@

    bioimageio/core/stat_calculators.py (no class) - 141 - 29 - 0 - 79% + 140 + 28 + 1 + 80% bioimageio/core/stat_measures.py @@ -955,18 +955,18 @@

    bioimageio/core/tensor.py Tensor - 136 - 50 - 0 - 63% + 134 + 45 + 2 + 66% bioimageio/core/tensor.py (no class) - 89 - 11 - 0 - 88% + 87 + 8 + 2 + 91% bioimageio/core/utils/__init__.py @@ -1035,10 +1035,10 @@

    bioimageio/core/weight_converter/torch/_torchscript.py (no class) - 74 - 62 - 0 - 16% + 73 + 61 + 1 + 16% bioimageio/core/weight_converter/torch/_utils.py @@ -1053,10 +1053,10 @@

    Total   - 3101 - 1153 - 0 - 63% + 3162 + 842 + 23 + 73% @@ -1067,8 +1067,8 @@

    diff --git a/coverage/z_1c9984a0493eef95_testing_py.html b/coverage/z_1c9984a0493eef95_testing_py.html index 544aebdb..ead31c87 100644 --- a/coverage/z_1c9984a0493eef95_testing_py.html +++ b/coverage/z_1c9984a0493eef95_testing_py.html @@ -64,8 +64,8 @@

    ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    diff --git a/coverage/z_26936cb883974160___init___py.html b/coverage/z_26936cb883974160___init___py.html index 9e13c41c..30ba1c59 100644 --- a/coverage/z_26936cb883974160___init___py.html +++ b/coverage/z_26936cb883974160___init___py.html @@ -54,8 +54,8 @@

    - 19 statements   - + 22 statements   +

    @@ -64,8 +64,8 @@

    ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    diff --git a/coverage/z_26936cb883974160__magic_tensor_ops_py.html b/coverage/z_26936cb883974160__magic_tensor_ops_py.html index b7d99fcd..94d6feec 100644 --- a/coverage/z_26936cb883974160__magic_tensor_ops_py.html +++ b/coverage/z_26936cb883974160__magic_tensor_ops_py.html @@ -64,8 +64,8 @@

    ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    diff --git a/coverage/z_26936cb883974160__op_base_py.html b/coverage/z_26936cb883974160__op_base_py.html index f1de15e9..a9e1b9bc 100644 --- a/coverage/z_26936cb883974160__op_base_py.html +++ b/coverage/z_26936cb883974160__op_base_py.html @@ -64,8 +64,8 @@

    ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    diff --git a/coverage/z_26936cb883974160__prediction_pipeline_py.html b/coverage/z_26936cb883974160__prediction_pipeline_py.html index be912fa7..cfe50f3f 100644 --- a/coverage/z_26936cb883974160__prediction_pipeline_py.html +++ b/coverage/z_26936cb883974160__prediction_pipeline_py.html @@ -2,7 +2,7 @@ - Coverage for bioimageio/core/_prediction_pipeline.py: 68% + Coverage for bioimageio/core/_prediction_pipeline.py: 89% @@ -12,7 +12,7 @@

    Coverage for bioimageio/core/_prediction_pipeline.py: - 68% + 89%

    - 115 statements   - - - + 114 statements   + + +

    « prev     ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    diff --git a/coverage/z_26936cb883974160__resource_tests_py.html b/coverage/z_26936cb883974160__resource_tests_py.html index 390413be..efbec15b 100644 --- a/coverage/z_26936cb883974160__resource_tests_py.html +++ b/coverage/z_26936cb883974160__resource_tests_py.html @@ -2,7 +2,7 @@ - Coverage for bioimageio/core/_resource_tests.py: 79% + Coverage for bioimageio/core/_resource_tests.py: 75% @@ -12,7 +12,7 @@

    Coverage for bioimageio/core/_resource_tests.py: - 79% + 75%

    - 131 statements   - - + 169 statements   + +

    @@ -64,8 +64,8 @@

    ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    - 9 statements   - + 10 statements   +

    @@ -64,8 +64,8 @@

    ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    - 64 statements   + 62 statements   - - + +

    « prev     ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    diff --git a/coverage/z_26936cb883974160_block_meta_py.html b/coverage/z_26936cb883974160_block_meta_py.html index d50f414b..c06bf10f 100644 --- a/coverage/z_26936cb883974160_block_meta_py.html +++ b/coverage/z_26936cb883974160_block_meta_py.html @@ -2,7 +2,7 @@ - Coverage for bioimageio/core/block_meta.py: 41% + Coverage for bioimageio/core/block_meta.py: 84% @@ -12,7 +12,7 @@

    Coverage for bioimageio/core/block_meta.py: - 41% + 84%

    - 123 statements   - - + 126 statements   + +

    @@ -64,8 +64,8 @@

    ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    - 265 statements   - - - + 272 statements   + + +

    « prev     ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    diff --git a/coverage/z_26936cb883974160_digest_spec_py.html b/coverage/z_26936cb883974160_digest_spec_py.html index e67015ef..d47156af 100644 --- a/coverage/z_26936cb883974160_digest_spec_py.html +++ b/coverage/z_26936cb883974160_digest_spec_py.html @@ -2,7 +2,7 @@ - Coverage for bioimageio/core/digest_spec.py: 67% + Coverage for bioimageio/core/digest_spec.py: 90% @@ -12,7 +12,7 @@

    Coverage for bioimageio/core/digest_spec.py: - 67% + 90%

    - 171 statements   - - - + 162 statements   + + +

    « prev     ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    - 50 statements   - - + 86 statements   + +

    @@ -64,8 +64,8 @@

    ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    - 328 statements   + 321 statements   - - + +

    « prev     ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    - 84 statements   - - - + 82 statements   + + +

    « prev     ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    - 114 statements   - - + 115 statements   + +

    @@ -64,8 +64,8 @@

    ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    - 322 statements   + 321 statements   - - + +

    « prev     ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    diff --git a/coverage/z_26936cb883974160_stat_measures_py.html b/coverage/z_26936cb883974160_stat_measures_py.html index 7a762dca..9c699187 100644 --- a/coverage/z_26936cb883974160_stat_measures_py.html +++ b/coverage/z_26936cb883974160_stat_measures_py.html @@ -64,8 +64,8 @@

    ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    diff --git a/coverage/z_26936cb883974160_tensor_py.html b/coverage/z_26936cb883974160_tensor_py.html index 90f63205..2849ce37 100644 --- a/coverage/z_26936cb883974160_tensor_py.html +++ b/coverage/z_26936cb883974160_tensor_py.html @@ -2,7 +2,7 @@ - Coverage for bioimageio/core/tensor.py: 73% + Coverage for bioimageio/core/tensor.py: 76% @@ -12,7 +12,7 @@

    Coverage for bioimageio/core/tensor.py: - 73% + 76%

    - 225 statements   - - - + 221 statements   + + +

    « prev     ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    diff --git a/coverage/z_8896ddf68e1c6b4b___init___py.html b/coverage/z_8896ddf68e1c6b4b___init___py.html index 084cb711..34bab872 100644 --- a/coverage/z_8896ddf68e1c6b4b___init___py.html +++ b/coverage/z_8896ddf68e1c6b4b___init___py.html @@ -64,8 +64,8 @@

    ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000

    diff --git a/coverage/z_9c23042445a2a752___init___py.html b/coverage/z_9c23042445a2a752___init___py.html index 50bc31b4..733f85fc 100644 --- a/coverage/z_9c23042445a2a752___init___py.html +++ b/coverage/z_9c23042445a2a752___init___py.html @@ -64,8 +64,8 @@

    ^ index     » next       - coverage.py v7.6.4, - created at 2024-11-08 13:19 +0000 + coverage.py v7.6.7, + created at 2024-11-18 23:36 +0000