diff --git a/bioimageio/spec.html b/bioimageio/spec.html index 9abf9a140..2c3b225a7 100644 --- a/bioimageio/spec.html +++ b/bioimageio/spec.html @@ -623,6 +623,7 @@

bioimageio.spec 0.5.3
  • fix summary formatting
  • improve logged origin for logged messages
  • make the model.v0_5.ModelDescr.training_data field a left_to_right Union to avoid warnings
  • +
  • the deprecated version_number is no longer appended to the id, but instead set as version if no version is specified.
  • bioimageio.spec 0.5.3.3

    @@ -1451,16 +1452,17 @@

    model 0.3.2

    35 type: Literal["application"] = "application" 36 37 id: Optional[ApplicationId] = None -38 """Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)""" -39 -40 parent: Optional[ApplicationId] = None -41 """The description from which this one is derived""" -42 -43 source: Annotated[ -44 Optional[ImportantFileSource], -45 Field(description="URL or path to the source of the application"), -46 ] = None -47 """The primary source of the application""" +38 """bioimage.io-wide unique resource identifier +39 assigned by bioimage.io; version **un**specific.""" +40 +41 parent: Optional[ApplicationId] = None +42 """The description from which this one is derived""" +43 +44 source: Annotated[ +45 Optional[ImportantFileSource], +46 Field(description="URL or path to the source of the application"), +47 ] = None +48 """The primary source of the application""" @@ -1487,7 +1489,8 @@

    model 0.3.2

    -

    Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)

    +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    @@ -1507,7 +1510,7 @@

    model 0.3.2

    - source: Annotated[Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f63806efce0>), PlainSerializer(func=<function _package at 0x7f63805244a0>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, description='URL or path to the source of the application')] + source: Annotated[Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f560edf7ce0>), PlainSerializer(func=<function _package at 0x7f560ee284a0>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, description='URL or path to the source of the application')]
    @@ -1686,65 +1689,66 @@
    Returns:
    44 type: Literal["dataset"] = "dataset" 45 46 id: Optional[DatasetId] = None - 47 """Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)""" - 48 - 49 parent: Optional[DatasetId] = None - 50 """The description from which this one is derived""" - 51 - 52 source: Optional[HttpUrl] = None - 53 """"URL to the source of the dataset.""" - 54 - 55 @model_validator(mode="before") - 56 @classmethod - 57 def _convert(cls, data: Dict[str, Any], /) -> Dict[str, Any]: - 58 if ( - 59 data.get("type") == "dataset" - 60 and isinstance(fv := data.get("format_version"), str) - 61 and fv.startswith("0.2.") - 62 ): - 63 old = DatasetDescr02.load(data) - 64 if isinstance(old, InvalidDescr): - 65 return data - 66 - 67 return cast( - 68 Dict[str, Any], - 69 (cls if TYPE_CHECKING else dict)( - 70 attachments=( - 71 [] - 72 if old.attachments is None - 73 else [FileDescr(source=f) for f in old.attachments.files] - 74 ), - 75 authors=[ - 76 _author_conv.convert_as_dict(a) for a in old.authors - 77 ], # pyright: ignore[reportArgumentType] - 78 badges=old.badges, - 79 cite=[ - 80 {"text": c.text, "doi": c.doi, "url": c.url} for c in old.cite - 81 ], # pyright: ignore[reportArgumentType] - 82 config=old.config, - 83 covers=old.covers, - 84 description=old.description, - 85 documentation=cast(DocumentationSource, old.documentation), - 86 format_version="0.3.0", - 87 git_repo=old.git_repo, # pyright: ignore[reportArgumentType] - 88 icon=old.icon, - 89 id=None if old.id is None else DatasetId(old.id), - 90 license=old.license, # type: ignore - 91 links=old.links, - 92 maintainers=[ - 93 _maintainer_conv.convert_as_dict(m) for m in old.maintainers - 94 ], # pyright: ignore[reportArgumentType] - 95 name=old.name, - 96 source=old.source, - 97 tags=old.tags, - 98 type=old.type, - 99 uploader=old.uploader, -100 version=old.version, -101 **(old.model_extra or {}), -102 ), -103 ) -104 -105 return data + 47 """bioimage.io-wide unique resource identifier + 48 assigned by bioimage.io; version **un**specific.""" + 49 + 50 parent: Optional[DatasetId] = None + 51 """The description from which this one is derived""" + 52 + 53 source: Optional[HttpUrl] = None + 54 """"URL to the source of the dataset.""" + 55 + 56 @model_validator(mode="before") + 57 @classmethod + 58 def _convert(cls, data: Dict[str, Any], /) -> Dict[str, Any]: + 59 if ( + 60 data.get("type") == "dataset" + 61 and isinstance(fv := data.get("format_version"), str) + 62 and fv.startswith("0.2.") + 63 ): + 64 old = DatasetDescr02.load(data) + 65 if isinstance(old, InvalidDescr): + 66 return data + 67 + 68 return cast( + 69 Dict[str, Any], + 70 (cls if TYPE_CHECKING else dict)( + 71 attachments=( + 72 [] + 73 if old.attachments is None + 74 else [FileDescr(source=f) for f in old.attachments.files] + 75 ), + 76 authors=[ + 77 _author_conv.convert_as_dict(a) for a in old.authors + 78 ], # pyright: ignore[reportArgumentType] + 79 badges=old.badges, + 80 cite=[ + 81 {"text": c.text, "doi": c.doi, "url": c.url} for c in old.cite + 82 ], # pyright: ignore[reportArgumentType] + 83 config=old.config, + 84 covers=old.covers, + 85 description=old.description, + 86 documentation=cast(DocumentationSource, old.documentation), + 87 format_version="0.3.0", + 88 git_repo=old.git_repo, # pyright: ignore[reportArgumentType] + 89 icon=old.icon, + 90 id=None if old.id is None else DatasetId(old.id), + 91 license=old.license, # type: ignore + 92 links=old.links, + 93 maintainers=[ + 94 _maintainer_conv.convert_as_dict(m) for m in old.maintainers + 95 ], # pyright: ignore[reportArgumentType] + 96 name=old.name, + 97 source=old.source, + 98 tags=old.tags, + 99 type=old.type, +100 uploader=old.uploader, +101 version=old.version, +102 **(old.model_extra or {}), +103 ), +104 ) +105 +106 return data
    @@ -1772,7 +1776,8 @@
    Returns:
    -

    Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)

    +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    @@ -1961,24 +1966,25 @@
    Inherited Members
    421 """The resource type assigns a broad category to the resource.""" 422 423 id: Optional[ResourceId] = None -424 """Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)""" -425 -426 parent: Optional[ResourceId] = None -427 """The description from which this one is derived""" -428 -429 source: Optional[HttpUrl] = None -430 """The primary source of the resource""" -431 -432 @field_validator("type", mode="after") -433 @classmethod -434 def check_specific_types(cls, value: str) -> str: -435 if value in KNOWN_SPECIFIC_RESOURCE_TYPES: -436 raise ValueError( -437 f"Use the {value} description instead of this generic description for" -438 + f" your '{value}' resource." -439 ) -440 -441 return value +424 """bioimage.io-wide unique resource identifier +425 assigned by bioimage.io; version **un**specific.""" +426 +427 parent: Optional[ResourceId] = None +428 """The description from which this one is derived""" +429 +430 source: Optional[HttpUrl] = None +431 """The primary source of the resource""" +432 +433 @field_validator("type", mode="after") +434 @classmethod +435 def check_specific_types(cls, value: str) -> str: +436 if value in KNOWN_SPECIFIC_RESOURCE_TYPES: +437 raise ValueError( +438 f"Use the {value} description instead of this generic description for" +439 + f" your '{value}' resource." +440 ) +441 +442 return value
    @@ -2011,7 +2017,8 @@
    Inherited Members
    -

    Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)

    +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    @@ -2055,16 +2062,16 @@
    Inherited Members
    -
    432    @field_validator("type", mode="after")
    -433    @classmethod
    -434    def check_specific_types(cls, value: str) -> str:
    -435        if value in KNOWN_SPECIFIC_RESOURCE_TYPES:
    -436            raise ValueError(
    -437                f"Use the {value} description instead of this generic description for"
    -438                + f" your '{value}' resource."
    -439            )
    -440
    -441        return value
    +            
    433    @field_validator("type", mode="after")
    +434    @classmethod
    +435    def check_specific_types(cls, value: str) -> str:
    +436        if value in KNOWN_SPECIFIC_RESOURCE_TYPES:
    +437            raise ValueError(
    +438                f"Use the {value} description instead of this generic description for"
    +439                + f" your '{value}' resource."
    +440            )
    +441
    +442        return value
     
    @@ -2586,552 +2593,614 @@
    Returns:
    -
    2021class ModelDescr(GenericModelDescrBase, title="bioimage.io model specification"):
    -2022    """Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights.
    -2023    These fields are typically stored in a YAML file which we call a model resource description file (model RDF).
    -2024    """
    -2025
    -2026    format_version: Literal["0.5.3"] = "0.5.3"
    -2027    """Version of the bioimage.io model description specification used.
    -2028    When creating a new model always use the latest micro/patch version described here.
    -2029    The `format_version` is important for any consumer software to understand how to parse the fields.
    -2030    """
    -2031
    -2032    type: Literal["model"] = "model"
    -2033    """Specialized resource type 'model'"""
    -2034
    -2035    id: Optional[ModelId] = None
    -2036    """Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)"""
    -2037
    -2038    authors: NotEmpty[List[Author]]
    -2039    """The authors are the creators of the model RDF and the primary points of contact."""
    -2040
    -2041    documentation: Annotated[
    -2042        DocumentationSource,
    -2043        Field(
    -2044            examples=[
    -2045                "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
    -2046                "README.md",
    -2047            ],
    -2048        ),
    -2049    ]
    -2050    """∈📦 URL or relative path to a markdown file with additional documentation.
    -2051    The recommended documentation file name is `README.md`. An `.md` suffix is mandatory.
    -2052    The documentation should include a '#[#] Validation' (sub)section
    -2053    with details on how to quantitatively validate the model on unseen data."""
    -2054
    -2055    @field_validator("documentation", mode="after")
    -2056    @classmethod
    -2057    def _validate_documentation(cls, value: DocumentationSource) -> DocumentationSource:
    -2058        if not validation_context_var.get().perform_io_checks:
    -2059            return value
    -2060
    -2061        doc_path = download(value).path
    -2062        doc_content = doc_path.read_text(encoding="utf-8")
    -2063        assert isinstance(doc_content, str)
    -2064        if not re.match("#.*[vV]alidation", doc_content):
    -2065            issue_warning(
    -2066                "No '# Validation' (sub)section found in {value}.",
    -2067                value=value,
    -2068                field="documentation",
    -2069            )
    -2070
    -2071        return value
    +            
    2032class ModelDescr(GenericModelDescrBase, title="bioimage.io model specification"):
    +2033    """Specification of the fields used in a bioimage.io-compliant RDF to describe AI models with pretrained weights.
    +2034    These fields are typically stored in a YAML file which we call a model resource description file (model RDF).
    +2035    """
    +2036
    +2037    format_version: Literal["0.5.3"] = "0.5.3"
    +2038    """Version of the bioimage.io model description specification used.
    +2039    When creating a new model always use the latest micro/patch version described here.
    +2040    The `format_version` is important for any consumer software to understand how to parse the fields.
    +2041    """
    +2042
    +2043    type: Literal["model"] = "model"
    +2044    """Specialized resource type 'model'"""
    +2045
    +2046    id: Optional[ModelId] = None
    +2047    """bioimage.io-wide unique resource identifier
    +2048    assigned by bioimage.io; version **un**specific."""
    +2049
    +2050    authors: NotEmpty[List[Author]]
    +2051    """The authors are the creators of the model RDF and the primary points of contact."""
    +2052
    +2053    documentation: Annotated[
    +2054        DocumentationSource,
    +2055        Field(
    +2056            examples=[
    +2057                "https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md",
    +2058                "README.md",
    +2059            ],
    +2060        ),
    +2061    ]
    +2062    """∈📦 URL or relative path to a markdown file with additional documentation.
    +2063    The recommended documentation file name is `README.md`. An `.md` suffix is mandatory.
    +2064    The documentation should include a '#[#] Validation' (sub)section
    +2065    with details on how to quantitatively validate the model on unseen data."""
    +2066
    +2067    @field_validator("documentation", mode="after")
    +2068    @classmethod
    +2069    def _validate_documentation(cls, value: DocumentationSource) -> DocumentationSource:
    +2070        if not validation_context_var.get().perform_io_checks:
    +2071            return value
     2072
    -2073    inputs: NotEmpty[Sequence[InputTensorDescr]]
    -2074    """Describes the input tensors expected by this model."""
    -2075
    -2076    @field_validator("inputs", mode="after")
    -2077    @classmethod
    -2078    def _validate_input_axes(
    -2079        cls, inputs: Sequence[InputTensorDescr]
    -2080    ) -> Sequence[InputTensorDescr]:
    -2081        input_size_refs = cls._get_axes_with_independent_size(inputs)
    +2073        doc_path = download(value).path
    +2074        doc_content = doc_path.read_text(encoding="utf-8")
    +2075        assert isinstance(doc_content, str)
    +2076        if not re.match("#.*[vV]alidation", doc_content):
    +2077            issue_warning(
    +2078                "No '# Validation' (sub)section found in {value}.",
    +2079                value=value,
    +2080                field="documentation",
    +2081            )
     2082
    -2083        for i, ipt in enumerate(inputs):
    -2084            valid_independent_refs: Dict[
    -2085                Tuple[TensorId, AxisId],
    -2086                Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    -2087            ] = {
    -2088                **{
    -2089                    (ipt.id, a.id): (ipt, a, a.size)
    -2090                    for a in ipt.axes
    -2091                    if not isinstance(a, BatchAxis)
    -2092                    and isinstance(a.size, (int, ParameterizedSize))
    -2093                },
    -2094                **input_size_refs,
    -2095            }
    -2096            for a, ax in enumerate(ipt.axes):
    -2097                cls._validate_axis(
    -2098                    "inputs",
    -2099                    i=i,
    -2100                    tensor_id=ipt.id,
    -2101                    a=a,
    -2102                    axis=ax,
    -2103                    valid_independent_refs=valid_independent_refs,
    -2104                )
    -2105        return inputs
    -2106
    -2107    @staticmethod
    -2108    def _validate_axis(
    -2109        field_name: str,
    -2110        i: int,
    -2111        tensor_id: TensorId,
    -2112        a: int,
    -2113        axis: AnyAxis,
    -2114        valid_independent_refs: Dict[
    -2115            Tuple[TensorId, AxisId],
    -2116            Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    -2117        ],
    -2118    ):
    -2119        if isinstance(axis, BatchAxis) or isinstance(
    -2120            axis.size, (int, ParameterizedSize, DataDependentSize)
    -2121        ):
    -2122            return
    -2123        elif not isinstance(axis.size, SizeReference):
    -2124            assert_never(axis.size)
    -2125
    -2126        # validate axis.size SizeReference
    -2127        ref = (axis.size.tensor_id, axis.size.axis_id)
    -2128        if ref not in valid_independent_refs:
    -2129            raise ValueError(
    -2130                "Invalid tensor axis reference at"
    -2131                + f" {field_name}[{i}].axes[{a}].size: {axis.size}."
    -2132            )
    -2133        if ref == (tensor_id, axis.id):
    -2134            raise ValueError(
    -2135                "Self-referencing not allowed for"
    -2136                + f" {field_name}[{i}].axes[{a}].size: {axis.size}"
    -2137            )
    -2138        if axis.type == "channel":
    -2139            if valid_independent_refs[ref][1].type != "channel":
    -2140                raise ValueError(
    -2141                    "A channel axis' size may only reference another fixed size"
    -2142                    + " channel axis."
    -2143                )
    -2144            if isinstance(axis.channel_names, str) and "{i}" in axis.channel_names:
    -2145                ref_size = valid_independent_refs[ref][2]
    -2146                assert isinstance(ref_size, int), (
    -2147                    "channel axis ref (another channel axis) has to specify fixed"
    -2148                    + " size"
    -2149                )
    -2150                generated_channel_names = [
    -2151                    Identifier(axis.channel_names.format(i=i))
    -2152                    for i in range(1, ref_size + 1)
    -2153                ]
    -2154                axis.channel_names = generated_channel_names
    -2155
    -2156        if (ax_unit := getattr(axis, "unit", None)) != (
    -2157            ref_unit := getattr(valid_independent_refs[ref][1], "unit", None)
    -2158        ):
    -2159            raise ValueError(
    -2160                "The units of an axis and its reference axis need to match, but"
    -2161                + f" '{ax_unit}' != '{ref_unit}'."
    -2162            )
    -2163        ref_axis = valid_independent_refs[ref][1]
    -2164        if isinstance(ref_axis, BatchAxis):
    -2165            raise ValueError(
    -2166                f"Invalid reference axis '{ref_axis.id}' for {tensor_id}.{axis.id}"
    -2167                + " (a batch axis is not allowed as reference)."
    -2168            )
    -2169
    -2170        if isinstance(axis, WithHalo):
    -2171            min_size = axis.size.get_size(axis, ref_axis, n=0)
    -2172            if (min_size - 2 * axis.halo) < 1:
    -2173                raise ValueError(
    -2174                    f"axis {axis.id} with minimum size {min_size} is too small for halo"
    -2175                    + f" {axis.halo}."
    -2176                )
    -2177
    -2178            input_halo = axis.halo * axis.scale / ref_axis.scale
    -2179            if input_halo != int(input_halo) or input_halo % 2 == 1:
    -2180                raise ValueError(
    -2181                    f"input_halo {input_halo} (output_halo {axis.halo} *"
    -2182                    + f" output_scale {axis.scale} / input_scale {ref_axis.scale})"
    -2183                    + f" is not an even integer for {tensor_id}.{axis.id}."
    -2184                )
    -2185
    -2186    @model_validator(mode="after")
    -2187    def _validate_test_tensors(self) -> Self:
    -2188        if not validation_context_var.get().perform_io_checks:
    -2189            return self
    -2190
    -2191        test_arrays = [
    -2192            load_array(descr.test_tensor.download().path)
    -2193            for descr in chain(self.inputs, self.outputs)
    -2194        ]
    -2195        tensors = {
    -2196            descr.id: (descr, array)
    -2197            for descr, array in zip(chain(self.inputs, self.outputs), test_arrays)
    -2198        }
    -2199        validate_tensors(tensors, tensor_origin="test_tensor")
    -2200        return self
    -2201
    -2202    @model_validator(mode="after")
    -2203    def _validate_tensor_references_in_proc_kwargs(self, info: ValidationInfo) -> Self:
    -2204        ipt_refs = {t.id for t in self.inputs}
    -2205        out_refs = {t.id for t in self.outputs}
    -2206        for ipt in self.inputs:
    -2207            for p in ipt.preprocessing:
    -2208                ref = p.kwargs.get("reference_tensor")
    -2209                if ref is None:
    -2210                    continue
    -2211                if ref not in ipt_refs:
    -2212                    raise ValueError(
    -2213                        f"`reference_tensor` '{ref}' not found. Valid input tensor"
    -2214                        + f" references are: {ipt_refs}."
    -2215                    )
    -2216
    -2217        for out in self.outputs:
    -2218            for p in out.postprocessing:
    -2219                ref = p.kwargs.get("reference_tensor")
    -2220                if ref is None:
    -2221                    continue
    -2222
    -2223                if ref not in ipt_refs and ref not in out_refs:
    +2083        return value
    +2084
    +2085    inputs: NotEmpty[Sequence[InputTensorDescr]]
    +2086    """Describes the input tensors expected by this model."""
    +2087
    +2088    @field_validator("inputs", mode="after")
    +2089    @classmethod
    +2090    def _validate_input_axes(
    +2091        cls, inputs: Sequence[InputTensorDescr]
    +2092    ) -> Sequence[InputTensorDescr]:
    +2093        input_size_refs = cls._get_axes_with_independent_size(inputs)
    +2094
    +2095        for i, ipt in enumerate(inputs):
    +2096            valid_independent_refs: Dict[
    +2097                Tuple[TensorId, AxisId],
    +2098                Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    +2099            ] = {
    +2100                **{
    +2101                    (ipt.id, a.id): (ipt, a, a.size)
    +2102                    for a in ipt.axes
    +2103                    if not isinstance(a, BatchAxis)
    +2104                    and isinstance(a.size, (int, ParameterizedSize))
    +2105                },
    +2106                **input_size_refs,
    +2107            }
    +2108            for a, ax in enumerate(ipt.axes):
    +2109                cls._validate_axis(
    +2110                    "inputs",
    +2111                    i=i,
    +2112                    tensor_id=ipt.id,
    +2113                    a=a,
    +2114                    axis=ax,
    +2115                    valid_independent_refs=valid_independent_refs,
    +2116                )
    +2117        return inputs
    +2118
    +2119    @staticmethod
    +2120    def _validate_axis(
    +2121        field_name: str,
    +2122        i: int,
    +2123        tensor_id: TensorId,
    +2124        a: int,
    +2125        axis: AnyAxis,
    +2126        valid_independent_refs: Dict[
    +2127            Tuple[TensorId, AxisId],
    +2128            Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    +2129        ],
    +2130    ):
    +2131        if isinstance(axis, BatchAxis) or isinstance(
    +2132            axis.size, (int, ParameterizedSize, DataDependentSize)
    +2133        ):
    +2134            return
    +2135        elif not isinstance(axis.size, SizeReference):
    +2136            assert_never(axis.size)
    +2137
    +2138        # validate axis.size SizeReference
    +2139        ref = (axis.size.tensor_id, axis.size.axis_id)
    +2140        if ref not in valid_independent_refs:
    +2141            raise ValueError(
    +2142                "Invalid tensor axis reference at"
    +2143                + f" {field_name}[{i}].axes[{a}].size: {axis.size}."
    +2144            )
    +2145        if ref == (tensor_id, axis.id):
    +2146            raise ValueError(
    +2147                "Self-referencing not allowed for"
    +2148                + f" {field_name}[{i}].axes[{a}].size: {axis.size}"
    +2149            )
    +2150        if axis.type == "channel":
    +2151            if valid_independent_refs[ref][1].type != "channel":
    +2152                raise ValueError(
    +2153                    "A channel axis' size may only reference another fixed size"
    +2154                    + " channel axis."
    +2155                )
    +2156            if isinstance(axis.channel_names, str) and "{i}" in axis.channel_names:
    +2157                ref_size = valid_independent_refs[ref][2]
    +2158                assert isinstance(ref_size, int), (
    +2159                    "channel axis ref (another channel axis) has to specify fixed"
    +2160                    + " size"
    +2161                )
    +2162                generated_channel_names = [
    +2163                    Identifier(axis.channel_names.format(i=i))
    +2164                    for i in range(1, ref_size + 1)
    +2165                ]
    +2166                axis.channel_names = generated_channel_names
    +2167
    +2168        if (ax_unit := getattr(axis, "unit", None)) != (
    +2169            ref_unit := getattr(valid_independent_refs[ref][1], "unit", None)
    +2170        ):
    +2171            raise ValueError(
    +2172                "The units of an axis and its reference axis need to match, but"
    +2173                + f" '{ax_unit}' != '{ref_unit}'."
    +2174            )
    +2175        ref_axis = valid_independent_refs[ref][1]
    +2176        if isinstance(ref_axis, BatchAxis):
    +2177            raise ValueError(
    +2178                f"Invalid reference axis '{ref_axis.id}' for {tensor_id}.{axis.id}"
    +2179                + " (a batch axis is not allowed as reference)."
    +2180            )
    +2181
    +2182        if isinstance(axis, WithHalo):
    +2183            min_size = axis.size.get_size(axis, ref_axis, n=0)
    +2184            if (min_size - 2 * axis.halo) < 1:
    +2185                raise ValueError(
    +2186                    f"axis {axis.id} with minimum size {min_size} is too small for halo"
    +2187                    + f" {axis.halo}."
    +2188                )
    +2189
    +2190            input_halo = axis.halo * axis.scale / ref_axis.scale
    +2191            if input_halo != int(input_halo) or input_halo % 2 == 1:
    +2192                raise ValueError(
    +2193                    f"input_halo {input_halo} (output_halo {axis.halo} *"
    +2194                    + f" output_scale {axis.scale} / input_scale {ref_axis.scale})"
    +2195                    + f" is not an even integer for {tensor_id}.{axis.id}."
    +2196                )
    +2197
    +2198    @model_validator(mode="after")
    +2199    def _validate_test_tensors(self) -> Self:
    +2200        if not validation_context_var.get().perform_io_checks:
    +2201            return self
    +2202
    +2203        test_arrays = [
    +2204            load_array(descr.test_tensor.download().path)
    +2205            for descr in chain(self.inputs, self.outputs)
    +2206        ]
    +2207        tensors = {
    +2208            descr.id: (descr, array)
    +2209            for descr, array in zip(chain(self.inputs, self.outputs), test_arrays)
    +2210        }
    +2211        validate_tensors(tensors, tensor_origin="test_tensor")
    +2212        return self
    +2213
    +2214    @model_validator(mode="after")
    +2215    def _validate_tensor_references_in_proc_kwargs(self, info: ValidationInfo) -> Self:
    +2216        ipt_refs = {t.id for t in self.inputs}
    +2217        out_refs = {t.id for t in self.outputs}
    +2218        for ipt in self.inputs:
    +2219            for p in ipt.preprocessing:
    +2220                ref = p.kwargs.get("reference_tensor")
    +2221                if ref is None:
    +2222                    continue
    +2223                if ref not in ipt_refs:
     2224                    raise ValueError(
    -2225                        f"`reference_tensor` '{ref}' not found. Valid tensor references"
    -2226                        + f" are: {ipt_refs | out_refs}."
    +2225                        f"`reference_tensor` '{ref}' not found. Valid input tensor"
    +2226                        + f" references are: {ipt_refs}."
     2227                    )
     2228
    -2229        return self
    -2230
    -2231    # TODO: use validate funcs in validate_test_tensors
    -2232    # def validate_inputs(self, input_tensors: Mapping[TensorId, NDArray[Any]]) -> Mapping[TensorId, NDArray[Any]]:
    -2233
    -2234    name: Annotated[
    -2235        Annotated[
    -2236            str, RestrictCharacters(string.ascii_letters + string.digits + "_- ()")
    -2237        ],
    -2238        MinLen(5),
    -2239        MaxLen(128),
    -2240        warn(MaxLen(64), "Name longer than 64 characters.", INFO),
    -2241    ]
    -2242    """A human-readable name of this model.
    -2243    It should be no longer than 64 characters
    -2244    and may only contain letter, number, underscore, minus, parentheses and spaces.
    -2245    We recommend to chose a name that refers to the model's task and image modality.
    -2246    """
    -2247
    -2248    outputs: NotEmpty[Sequence[OutputTensorDescr]]
    -2249    """Describes the output tensors."""
    -2250
    -2251    @field_validator("outputs", mode="after")
    -2252    @classmethod
    -2253    def _validate_tensor_ids(
    -2254        cls, outputs: Sequence[OutputTensorDescr], info: ValidationInfo
    -2255    ) -> Sequence[OutputTensorDescr]:
    -2256        tensor_ids = [
    -2257            t.id for t in info.data.get("inputs", []) + info.data.get("outputs", [])
    -2258        ]
    -2259        duplicate_tensor_ids: List[str] = []
    -2260        seen: Set[str] = set()
    -2261        for t in tensor_ids:
    -2262            if t in seen:
    -2263                duplicate_tensor_ids.append(t)
    -2264
    -2265            seen.add(t)
    -2266
    -2267        if duplicate_tensor_ids:
    -2268            raise ValueError(f"Duplicate tensor ids: {duplicate_tensor_ids}")
    -2269
    -2270        return outputs
    -2271
    -2272    @staticmethod
    -2273    def _get_axes_with_parameterized_size(
    -2274        io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
    -2275    ):
    -2276        return {
    -2277            f"{t.id}.{a.id}": (t, a, a.size)
    -2278            for t in io
    -2279            for a in t.axes
    -2280            if not isinstance(a, BatchAxis) and isinstance(a.size, ParameterizedSize)
    -2281        }
    -2282
    -2283    @staticmethod
    -2284    def _get_axes_with_independent_size(
    -2285        io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
    -2286    ):
    -2287        return {
    -2288            (t.id, a.id): (t, a, a.size)
    -2289            for t in io
    -2290            for a in t.axes
    -2291            if not isinstance(a, BatchAxis)
    -2292            and isinstance(a.size, (int, ParameterizedSize))
    +2229        for out in self.outputs:
    +2230            for p in out.postprocessing:
    +2231                ref = p.kwargs.get("reference_tensor")
    +2232                if ref is None:
    +2233                    continue
    +2234
    +2235                if ref not in ipt_refs and ref not in out_refs:
    +2236                    raise ValueError(
    +2237                        f"`reference_tensor` '{ref}' not found. Valid tensor references"
    +2238                        + f" are: {ipt_refs | out_refs}."
    +2239                    )
    +2240
    +2241        return self
    +2242
    +2243    # TODO: use validate funcs in validate_test_tensors
    +2244    # def validate_inputs(self, input_tensors: Mapping[TensorId, NDArray[Any]]) -> Mapping[TensorId, NDArray[Any]]:
    +2245
    +2246    name: Annotated[
    +2247        Annotated[
    +2248            str, RestrictCharacters(string.ascii_letters + string.digits + "_- ()")
    +2249        ],
    +2250        MinLen(5),
    +2251        MaxLen(128),
    +2252        warn(MaxLen(64), "Name longer than 64 characters.", INFO),
    +2253    ]
    +2254    """A human-readable name of this model.
    +2255    It should be no longer than 64 characters
    +2256    and may only contain letter, number, underscore, minus, parentheses and spaces.
    +2257    We recommend to chose a name that refers to the model's task and image modality.
    +2258    """
    +2259
    +2260    outputs: NotEmpty[Sequence[OutputTensorDescr]]
    +2261    """Describes the output tensors."""
    +2262
    +2263    @field_validator("outputs", mode="after")
    +2264    @classmethod
    +2265    def _validate_tensor_ids(
    +2266        cls, outputs: Sequence[OutputTensorDescr], info: ValidationInfo
    +2267    ) -> Sequence[OutputTensorDescr]:
    +2268        tensor_ids = [
    +2269            t.id for t in info.data.get("inputs", []) + info.data.get("outputs", [])
    +2270        ]
    +2271        duplicate_tensor_ids: List[str] = []
    +2272        seen: Set[str] = set()
    +2273        for t in tensor_ids:
    +2274            if t in seen:
    +2275                duplicate_tensor_ids.append(t)
    +2276
    +2277            seen.add(t)
    +2278
    +2279        if duplicate_tensor_ids:
    +2280            raise ValueError(f"Duplicate tensor ids: {duplicate_tensor_ids}")
    +2281
    +2282        return outputs
    +2283
    +2284    @staticmethod
    +2285    def _get_axes_with_parameterized_size(
    +2286        io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
    +2287    ):
    +2288        return {
    +2289            f"{t.id}.{a.id}": (t, a, a.size)
    +2290            for t in io
    +2291            for a in t.axes
    +2292            if not isinstance(a, BatchAxis) and isinstance(a.size, ParameterizedSize)
     2293        }
     2294
    -2295    @field_validator("outputs", mode="after")
    -2296    @classmethod
    -2297    def _validate_output_axes(
    -2298        cls, outputs: List[OutputTensorDescr], info: ValidationInfo
    -2299    ) -> List[OutputTensorDescr]:
    -2300        input_size_refs = cls._get_axes_with_independent_size(
    -2301            info.data.get("inputs", [])
    -2302        )
    -2303        output_size_refs = cls._get_axes_with_independent_size(outputs)
    -2304
    -2305        for i, out in enumerate(outputs):
    -2306            valid_independent_refs: Dict[
    -2307                Tuple[TensorId, AxisId],
    -2308                Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    -2309            ] = {
    -2310                **{
    -2311                    (out.id, a.id): (out, a, a.size)
    -2312                    for a in out.axes
    -2313                    if not isinstance(a, BatchAxis)
    -2314                    and isinstance(a.size, (int, ParameterizedSize))
    -2315                },
    -2316                **input_size_refs,
    -2317                **output_size_refs,
    -2318            }
    -2319            for a, ax in enumerate(out.axes):
    -2320                cls._validate_axis(
    -2321                    "outputs",
    -2322                    i,
    -2323                    out.id,
    -2324                    a,
    -2325                    ax,
    -2326                    valid_independent_refs=valid_independent_refs,
    -2327                )
    -2328
    -2329        return outputs
    -2330
    -2331    packaged_by: List[Author] = Field(default_factory=list)
    -2332    """The persons that have packaged and uploaded this model.
    -2333    Only required if those persons differ from the `authors`."""
    -2334
    -2335    parent: Optional[LinkedModel] = None
    -2336    """The model from which this model is derived, e.g. by fine-tuning the weights."""
    -2337
    -2338    # todo: add parent self check once we have `id`
    -2339    # @model_validator(mode="after")
    -2340    # def validate_parent_is_not_self(self) -> Self:
    -2341    #     if self.parent is not None and self.parent == self.id:
    -2342    #         raise ValueError("The model may not reference itself as parent model")
    -2343
    -2344    #     return self
    -2345
    -2346    run_mode: Annotated[
    -2347        Optional[RunMode],
    -2348        warn(None, "Run mode '{value}' has limited support across consumer softwares."),
    -2349    ] = None
    -2350    """Custom run mode for this model: for more complex prediction procedures like test time
    -2351    data augmentation that currently cannot be expressed in the specification.
    -2352    No standard run modes are defined yet."""
    -2353
    -2354    timestamp: Datetime = Datetime(datetime.now())
    -2355    """Timestamp in [ISO 8601](#https://en.wikipedia.org/wiki/ISO_8601) format
    -2356    with a few restrictions listed [here](https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat).
    -2357    (In Python a datetime object is valid, too)."""
    -2358
    -2359    training_data: Annotated[
    -2360        Union[None, LinkedDataset, DatasetDescr, DatasetDescr02],
    -2361        Field(union_mode="left_to_right"),
    -2362    ] = None
    -2363    """The dataset used to train this model"""
    -2364
    -2365    weights: Annotated[WeightsDescr, WrapSerializer(package_weights)]
    -2366    """The weights for this model.
    -2367    Weights can be given for different formats, but should otherwise be equivalent.
    -2368    The available weight formats determine which consumers can use this model."""
    -2369
    -2370    @model_validator(mode="after")
    -2371    def _add_default_cover(self) -> Self:
    -2372        if not validation_context_var.get().perform_io_checks or self.covers:
    -2373            return self
    -2374
    -2375        try:
    -2376            generated_covers = generate_covers(
    -2377                [(t, load_array(t.test_tensor.download().path)) for t in self.inputs],
    -2378                [(t, load_array(t.test_tensor.download().path)) for t in self.outputs],
    -2379            )
    -2380        except Exception as e:
    -2381            issue_warning(
    -2382                "Failed to generate cover image(s): {e}",
    -2383                value=self.covers,
    -2384                msg_context=dict(e=e),
    -2385                field="covers",
    -2386            )
    -2387        else:
    -2388            self.covers.extend(generated_covers)
    -2389
    -2390        return self
    -2391
    -2392    def get_input_test_arrays(self) -> List[NDArray[Any]]:
    -2393        data = [load_array(ipt.test_tensor.download().path) for ipt in self.inputs]
    -2394        assert all(isinstance(d, np.ndarray) for d in data)
    -2395        return data
    -2396
    -2397    def get_output_test_arrays(self) -> List[NDArray[Any]]:
    -2398        data = [load_array(out.test_tensor.download().path) for out in self.outputs]
    -2399        assert all(isinstance(d, np.ndarray) for d in data)
    -2400        return data
    +2295    @staticmethod
    +2296    def _get_axes_with_independent_size(
    +2297        io: Union[Sequence[InputTensorDescr], Sequence[OutputTensorDescr]],
    +2298    ):
    +2299        return {
    +2300            (t.id, a.id): (t, a, a.size)
    +2301            for t in io
    +2302            for a in t.axes
    +2303            if not isinstance(a, BatchAxis)
    +2304            and isinstance(a.size, (int, ParameterizedSize))
    +2305        }
    +2306
    +2307    @field_validator("outputs", mode="after")
    +2308    @classmethod
    +2309    def _validate_output_axes(
    +2310        cls, outputs: List[OutputTensorDescr], info: ValidationInfo
    +2311    ) -> List[OutputTensorDescr]:
    +2312        input_size_refs = cls._get_axes_with_independent_size(
    +2313            info.data.get("inputs", [])
    +2314        )
    +2315        output_size_refs = cls._get_axes_with_independent_size(outputs)
    +2316
    +2317        for i, out in enumerate(outputs):
    +2318            valid_independent_refs: Dict[
    +2319                Tuple[TensorId, AxisId],
    +2320                Tuple[TensorDescr, AnyAxis, Union[int, ParameterizedSize]],
    +2321            ] = {
    +2322                **{
    +2323                    (out.id, a.id): (out, a, a.size)
    +2324                    for a in out.axes
    +2325                    if not isinstance(a, BatchAxis)
    +2326                    and isinstance(a.size, (int, ParameterizedSize))
    +2327                },
    +2328                **input_size_refs,
    +2329                **output_size_refs,
    +2330            }
    +2331            for a, ax in enumerate(out.axes):
    +2332                cls._validate_axis(
    +2333                    "outputs",
    +2334                    i,
    +2335                    out.id,
    +2336                    a,
    +2337                    ax,
    +2338                    valid_independent_refs=valid_independent_refs,
    +2339                )
    +2340
    +2341        return outputs
    +2342
    +2343    packaged_by: List[Author] = Field(default_factory=list)
    +2344    """The persons that have packaged and uploaded this model.
    +2345    Only required if those persons differ from the `authors`."""
    +2346
    +2347    parent: Optional[LinkedModel] = None
    +2348    """The model from which this model is derived, e.g. by fine-tuning the weights."""
    +2349
    +2350    # todo: add parent self check once we have `id`
    +2351    # @model_validator(mode="after")
    +2352    # def validate_parent_is_not_self(self) -> Self:
    +2353    #     if self.parent is not None and self.parent == self.id:
    +2354    #         raise ValueError("The model may not reference itself as parent model")
    +2355
    +2356    #     return self
    +2357
    +2358    run_mode: Annotated[
    +2359        Optional[RunMode],
    +2360        warn(None, "Run mode '{value}' has limited support across consumer softwares."),
    +2361    ] = None
    +2362    """Custom run mode for this model: for more complex prediction procedures like test time
    +2363    data augmentation that currently cannot be expressed in the specification.
    +2364    No standard run modes are defined yet."""
    +2365
    +2366    timestamp: Datetime = Datetime(datetime.now())
    +2367    """Timestamp in [ISO 8601](#https://en.wikipedia.org/wiki/ISO_8601) format
    +2368    with a few restrictions listed [here](https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat).
    +2369    (In Python a datetime object is valid, too)."""
    +2370
    +2371    training_data: Annotated[
    +2372        Union[None, LinkedDataset, DatasetDescr, DatasetDescr02],
    +2373        Field(union_mode="left_to_right"),
    +2374    ] = None
    +2375    """The dataset used to train this model"""
    +2376
    +2377    weights: Annotated[WeightsDescr, WrapSerializer(package_weights)]
    +2378    """The weights for this model.
    +2379    Weights can be given for different formats, but should otherwise be equivalent.
    +2380    The available weight formats determine which consumers can use this model."""
    +2381
    +2382    @model_validator(mode="after")
    +2383    def _add_default_cover(self) -> Self:
    +2384        if not validation_context_var.get().perform_io_checks or self.covers:
    +2385            return self
    +2386
    +2387        try:
    +2388            generated_covers = generate_covers(
    +2389                [(t, load_array(t.test_tensor.download().path)) for t in self.inputs],
    +2390                [(t, load_array(t.test_tensor.download().path)) for t in self.outputs],
    +2391            )
    +2392        except Exception as e:
    +2393            issue_warning(
    +2394                "Failed to generate cover image(s): {e}",
    +2395                value=self.covers,
    +2396                msg_context=dict(e=e),
    +2397                field="covers",
    +2398            )
    +2399        else:
    +2400            self.covers.extend(generated_covers)
     2401
    -2402    @staticmethod
    -2403    def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int:
    -2404        batch_size = 1
    -2405        tensor_with_batchsize: Optional[TensorId] = None
    -2406        for tid in tensor_sizes:
    -2407            for aid, s in tensor_sizes[tid].items():
    -2408                if aid != BATCH_AXIS_ID or s == 1 or s == batch_size:
    -2409                    continue
    -2410
    -2411                if batch_size != 1:
    -2412                    assert tensor_with_batchsize is not None
    -2413                    raise ValueError(
    -2414                        f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})"
    -2415                    )
    -2416
    -2417                batch_size = s
    -2418                tensor_with_batchsize = tid
    -2419
    -2420        return batch_size
    -2421
    -2422    def get_output_tensor_sizes(
    -2423        self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]
    -2424    ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]:
    -2425        """Returns the tensor output sizes for given **input_sizes**.
    -2426        Only if **input_sizes** has a valid input shape, the tensor output size is exact.
    -2427        Otherwise it might be larger than the actual (valid) output"""
    -2428        batch_size = self.get_batch_size(input_sizes)
    -2429        ns = self.get_ns(input_sizes)
    -2430
    -2431        tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size)
    -2432        return tensor_sizes.outputs
    +2402        return self
    +2403
    +2404    def get_input_test_arrays(self) -> List[NDArray[Any]]:
    +2405        data = [load_array(ipt.test_tensor.download().path) for ipt in self.inputs]
    +2406        assert all(isinstance(d, np.ndarray) for d in data)
    +2407        return data
    +2408
    +2409    def get_output_test_arrays(self) -> List[NDArray[Any]]:
    +2410        data = [load_array(out.test_tensor.download().path) for out in self.outputs]
    +2411        assert all(isinstance(d, np.ndarray) for d in data)
    +2412        return data
    +2413
    +2414    @staticmethod
    +2415    def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int:
    +2416        batch_size = 1
    +2417        tensor_with_batchsize: Optional[TensorId] = None
    +2418        for tid in tensor_sizes:
    +2419            for aid, s in tensor_sizes[tid].items():
    +2420                if aid != BATCH_AXIS_ID or s == 1 or s == batch_size:
    +2421                    continue
    +2422
    +2423                if batch_size != 1:
    +2424                    assert tensor_with_batchsize is not None
    +2425                    raise ValueError(
    +2426                        f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})"
    +2427                    )
    +2428
    +2429                batch_size = s
    +2430                tensor_with_batchsize = tid
    +2431
    +2432        return batch_size
     2433
    -2434    def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]):
    -2435        """get parameter `n` for each parameterized axis
    -2436        such that the valid input size is >= the given input size"""
    -2437        ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {}
    -2438        axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs}
    -2439        for tid in input_sizes:
    -2440            for aid, s in input_sizes[tid].items():
    -2441                size_descr = axes[tid][aid].size
    -2442                if isinstance(size_descr, ParameterizedSize):
    -2443                    ret[(tid, aid)] = size_descr.get_n(s)
    -2444                elif size_descr is None or isinstance(size_descr, (int, SizeReference)):
    -2445                    pass
    -2446                else:
    -2447                    assert_never(size_descr)
    -2448
    -2449        return ret
    -2450
    -2451    def get_tensor_sizes(
    -2452        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
    -2453    ) -> _TensorSizes:
    -2454        axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size)
    -2455        return _TensorSizes(
    -2456            {
    -2457                t: {
    -2458                    aa: axis_sizes.inputs[(tt, aa)]
    -2459                    for tt, aa in axis_sizes.inputs
    -2460                    if tt == t
    -2461                }
    -2462                for t in {tt for tt, _ in axis_sizes.inputs}
    -2463            },
    -2464            {
    -2465                t: {
    -2466                    aa: axis_sizes.outputs[(tt, aa)]
    -2467                    for tt, aa in axis_sizes.outputs
    -2468                    if tt == t
    -2469                }
    -2470                for t in {tt for tt, _ in axis_sizes.outputs}
    -2471            },
    -2472        )
    -2473
    -2474    def get_axis_sizes(
    -2475        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
    -2476    ) -> _AxisSizes:
    -2477        all_axes = {
    -2478            t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs)
    -2479        }
    -2480
    -2481        inputs: Dict[Tuple[TensorId, AxisId], int] = {}
    -2482        outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {}
    -2483
    -2484        def get_axis_size(a: Union[InputAxis, OutputAxis]):
    -2485            if isinstance(a, BatchAxis):
    -2486                if (t_descr.id, a.id) in ns:
    -2487                    raise ValueError(
    -2488                        "No size increment factor (n) for batch axis of tensor"
    -2489                        + f" '{t_descr.id}' expected."
    -2490                    )
    -2491                return batch_size
    -2492            elif isinstance(a.size, int):
    -2493                if (t_descr.id, a.id) in ns:
    -2494                    raise ValueError(
    -2495                        "No size increment factor (n) for fixed size axis"
    -2496                        + f" '{a.id}' of tensor '{t_descr.id}' expected."
    -2497                    )
    -2498                return a.size
    -2499            elif isinstance(a.size, ParameterizedSize):
    -2500                if (t_descr.id, a.id) not in ns:
    -2501                    raise ValueError(
    -2502                        "Size increment factor (n) missing for parametrized axis"
    -2503                        + f" '{a.id}' of tensor '{t_descr.id}'."
    -2504                    )
    -2505                return a.size.get_size(ns[(t_descr.id, a.id)])
    -2506            elif isinstance(a.size, SizeReference):
    -2507                if (t_descr.id, a.id) in ns:
    -2508                    raise ValueError(
    -2509                        f"No size increment factor (n) for axis '{a.id}' of tensor"
    -2510                        + f" '{t_descr.id}' with size reference expected."
    -2511                    )
    -2512                assert not isinstance(a, BatchAxis)
    -2513                ref_axis = all_axes[a.size.tensor_id][a.size.axis_id]
    -2514                assert not isinstance(ref_axis, BatchAxis)
    -2515                return a.size.get_size(
    -2516                    axis=a,
    -2517                    ref_axis=ref_axis,
    -2518                    n=ns.get((a.size.tensor_id, a.size.axis_id), 0),
    -2519                )
    -2520            elif isinstance(a.size, DataDependentSize):
    -2521                if (t_descr.id, a.id) in ns:
    -2522                    raise ValueError(
    -2523                        "No size increment factor (n) for data dependent size axis"
    -2524                        + f" '{a.id}' of tensor '{t_descr.id}' expected."
    -2525                    )
    -2526                return _DataDepSize(a.size.min, a.size.max)
    -2527            else:
    -2528                assert_never(a.size)
    -2529
    -2530        for t_descr in self.inputs:
    -2531            for a in t_descr.axes:
    -2532                s = get_axis_size(a)
    -2533                assert not isinstance(s, _DataDepSize)
    -2534                inputs[t_descr.id, a.id] = s
    -2535
    -2536        for t_descr in chain(self.inputs, self.outputs):
    -2537            for a in t_descr.axes:
    -2538                s = get_axis_size(a)
    -2539                outputs[t_descr.id, a.id] = s
    -2540
    -2541        return _AxisSizes(inputs=inputs, outputs=outputs)
    -2542
    -2543    @model_validator(mode="before")
    -2544    @classmethod
    -2545    def _convert(cls, data: Dict[str, Any]) -> Dict[str, Any]:
    -2546        if (
    -2547            data.get("type") == "model"
    -2548            and isinstance(fv := data.get("format_version"), str)
    -2549            and fv.count(".") == 2
    -2550        ):
    -2551            fv_parts = fv.split(".")
    -2552            if any(not p.isdigit() for p in fv_parts):
    -2553                return data
    -2554
    -2555            fv_tuple = tuple(map(int, fv_parts))
    +2434    def get_output_tensor_sizes(
    +2435        self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]
    +2436    ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]:
    +2437        """Returns the tensor output sizes for given **input_sizes**.
    +2438        Only if **input_sizes** has a valid input shape, the tensor output size is exact.
    +2439        Otherwise it might be larger than the actual (valid) output"""
    +2440        batch_size = self.get_batch_size(input_sizes)
    +2441        ns = self.get_ns(input_sizes)
    +2442
    +2443        tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size)
    +2444        return tensor_sizes.outputs
    +2445
    +2446    def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]):
    +2447        """get parameter `n` for each parameterized axis
    +2448        such that the valid input size is >= the given input size"""
    +2449        ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {}
    +2450        axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs}
    +2451        for tid in input_sizes:
    +2452            for aid, s in input_sizes[tid].items():
    +2453                size_descr = axes[tid][aid].size
    +2454                if isinstance(size_descr, ParameterizedSize):
    +2455                    ret[(tid, aid)] = size_descr.get_n(s)
    +2456                elif size_descr is None or isinstance(size_descr, (int, SizeReference)):
    +2457                    pass
    +2458                else:
    +2459                    assert_never(size_descr)
    +2460
    +2461        return ret
    +2462
    +2463    def get_tensor_sizes(
    +2464        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
    +2465    ) -> _TensorSizes:
    +2466        axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size)
    +2467        return _TensorSizes(
    +2468            {
    +2469                t: {
    +2470                    aa: axis_sizes.inputs[(tt, aa)]
    +2471                    for tt, aa in axis_sizes.inputs
    +2472                    if tt == t
    +2473                }
    +2474                for t in {tt for tt, _ in axis_sizes.inputs}
    +2475            },
    +2476            {
    +2477                t: {
    +2478                    aa: axis_sizes.outputs[(tt, aa)]
    +2479                    for tt, aa in axis_sizes.outputs
    +2480                    if tt == t
    +2481                }
    +2482                for t in {tt for tt, _ in axis_sizes.outputs}
    +2483            },
    +2484        )
    +2485
    +2486    def get_axis_sizes(
    +2487        self,
    +2488        ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N],
    +2489        batch_size: Optional[int] = None,
    +2490        *,
    +2491        max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None,
    +2492    ) -> _AxisSizes:
    +2493        """Determine input and output block shape for scale factors **ns**
    +2494        of parameterized input sizes.
    +2495
    +2496        Args:
    +2497            ns: Scale factor `n` for each axis (keyed by (tensor_id, axis_id))
    +2498                that is parameterized as `size = min + n * step`.
    +2499            batch_size: The desired size of the batch dimension.
    +2500                If given **batch_size** overwrites any batch size present in
    +2501                **max_input_shape**. Default 1.
    +2502            max_input_shape: Limits the derived block shapes.
    +2503                Each axis for which the input size, parameterized by `n`, is larger
    +2504                than **max_input_shape** is set to the minimal value `n_min` for which
    +2505                this is still true.
    +2506                Use this for small input samples or large values of **ns**.
    +2507                Or simply whenever you know the full input shape.
    +2508
    +2509        Returns:
    +2510            Resolved axis sizes for model inputs and outputs.
    +2511        """
    +2512        max_input_shape = max_input_shape or {}
    +2513        if batch_size is None:
    +2514            for (_t_id, a_id), s in max_input_shape.items():
    +2515                if a_id == BATCH_AXIS_ID:
    +2516                    batch_size = s
    +2517                    break
    +2518            else:
    +2519                batch_size = 1
    +2520
    +2521        all_axes = {
    +2522            t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs)
    +2523        }
    +2524
    +2525        inputs: Dict[Tuple[TensorId, AxisId], int] = {}
    +2526        outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {}
    +2527
    +2528        def get_axis_size(a: Union[InputAxis, OutputAxis]):
    +2529            if isinstance(a, BatchAxis):
    +2530                if (t_descr.id, a.id) in ns:
    +2531                    logger.warning(
    +2532                        "Ignoring unexpected size increment factor (n) for batch axis"
    +2533                        + " of tensor '{}'.",
    +2534                        t_descr.id,
    +2535                    )
    +2536                return batch_size
    +2537            elif isinstance(a.size, int):
    +2538                if (t_descr.id, a.id) in ns:
    +2539                    logger.warning(
    +2540                        "Ignoring unexpected size increment factor (n) for fixed size"
    +2541                        + " axis '{}' of tensor '{}'.",
    +2542                        a.id,
    +2543                        t_descr.id,
    +2544                    )
    +2545                return a.size
    +2546            elif isinstance(a.size, ParameterizedSize):
    +2547                if (t_descr.id, a.id) not in ns:
    +2548                    raise ValueError(
    +2549                        "Size increment factor (n) missing for parametrized axis"
    +2550                        + f" '{a.id}' of tensor '{t_descr.id}'."
    +2551                    )
    +2552                n = ns[(t_descr.id, a.id)]
    +2553                s_max = max_input_shape.get((t_descr.id, a.id))
    +2554                if s_max is not None:
    +2555                    n = min(n, a.size.get_n(s_max))
     2556
    -2557            assert cls.implemented_format_version_tuple[0:2] == (0, 5)
    -2558            if fv_tuple[:2] in ((0, 3), (0, 4)):
    -2559                m04 = _ModelDescr_v0_4.load(data)
    -2560                if not isinstance(m04, InvalidDescr):
    -2561                    return _model_conv.convert_as_dict(m04)
    -2562            elif fv_tuple[:2] == (0, 5):
    -2563                # bump patch version
    -2564                data["format_version"] = cls.implemented_format_version
    -2565
    -2566        return data
    +2557                return a.size.get_size(n)
    +2558
    +2559            elif isinstance(a.size, SizeReference):
    +2560                if (t_descr.id, a.id) in ns:
    +2561                    logger.warning(
    +2562                        "Ignoring unexpected size increment factor (n) for axis '{}'"
    +2563                        + " of tensor '{}' with size reference.",
    +2564                        a.id,
    +2565                        t_descr.id,
    +2566                    )
    +2567                assert not isinstance(a, BatchAxis)
    +2568                ref_axis = all_axes[a.size.tensor_id][a.size.axis_id]
    +2569                assert not isinstance(ref_axis, BatchAxis)
    +2570                ref_key = (a.size.tensor_id, a.size.axis_id)
    +2571                ref_size = inputs.get(ref_key, outputs.get(ref_key))
    +2572                assert ref_size is not None, ref_key
    +2573                assert not isinstance(ref_size, _DataDepSize), ref_key
    +2574                return a.size.get_size(
    +2575                    axis=a,
    +2576                    ref_axis=ref_axis,
    +2577                    ref_size=ref_size,
    +2578                )
    +2579            elif isinstance(a.size, DataDependentSize):
    +2580                if (t_descr.id, a.id) in ns:
    +2581                    logger.warning(
    +2582                        "Ignoring unexpected increment factor (n) for data dependent"
    +2583                        + " size axis '{}' of tensor '{}'.",
    +2584                        a.id,
    +2585                        t_descr.id,
    +2586                    )
    +2587                return _DataDepSize(a.size.min, a.size.max)
    +2588            else:
    +2589                assert_never(a.size)
    +2590
    +2591        # first resolve all , but the `SizeReference` input sizes
    +2592        for t_descr in self.inputs:
    +2593            for a in t_descr.axes:
    +2594                if not isinstance(a.size, SizeReference):
    +2595                    s = get_axis_size(a)
    +2596                    assert not isinstance(s, _DataDepSize)
    +2597                    inputs[t_descr.id, a.id] = s
    +2598
    +2599        # resolve all other input axis sizes
    +2600        for t_descr in self.inputs:
    +2601            for a in t_descr.axes:
    +2602                if isinstance(a.size, SizeReference):
    +2603                    s = get_axis_size(a)
    +2604                    assert not isinstance(s, _DataDepSize)
    +2605                    inputs[t_descr.id, a.id] = s
    +2606
    +2607        # resolve all output axis sizes
    +2608        for t_descr in self.outputs:
    +2609            for a in t_descr.axes:
    +2610                assert not isinstance(a.size, ParameterizedSize)
    +2611                s = get_axis_size(a)
    +2612                outputs[t_descr.id, a.id] = s
    +2613
    +2614        return _AxisSizes(inputs=inputs, outputs=outputs)
    +2615
    +2616    @model_validator(mode="before")
    +2617    @classmethod
    +2618    def _convert(cls, data: Dict[str, Any]) -> Dict[str, Any]:
    +2619        if (
    +2620            data.get("type") == "model"
    +2621            and isinstance(fv := data.get("format_version"), str)
    +2622            and fv.count(".") == 2
    +2623        ):
    +2624            fv_parts = fv.split(".")
    +2625            if any(not p.isdigit() for p in fv_parts):
    +2626                return data
    +2627
    +2628            fv_tuple = tuple(map(int, fv_parts))
    +2629
    +2630            assert cls.implemented_format_version_tuple[0:2] == (0, 5)
    +2631            if fv_tuple[:2] in ((0, 3), (0, 4)):
    +2632                m04 = _ModelDescr_v0_4.load(data)
    +2633                if not isinstance(m04, InvalidDescr):
    +2634                    return _model_conv.convert_as_dict(m04)
    +2635            elif fv_tuple[:2] == (0, 5):
    +2636                # bump patch version
    +2637                data["format_version"] = cls.implemented_format_version
    +2638
    +2639        return data
     
    @@ -3176,7 +3245,8 @@
    Returns:
    -

    Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)

    +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    @@ -3196,7 +3266,7 @@
    Returns:
    - documentation: Annotated[Union[Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute)], bioimageio.spec._internal.io.RelativeFilePath, bioimageio.spec._internal.url.HttpUrl], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function _validate_md_suffix at 0x7f637e3cf560>), PlainSerializer(func=<function _package at 0x7f63805244a0>, return_type=PydanticUndefined, when_used='unless-none'), FieldInfo(annotation=NoneType, required=True, examples=['https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md', 'README.md'])] + documentation: Annotated[Union[Annotated[pathlib.Path, PathType(path_type='file'), Predicate(is_absolute)], bioimageio.spec._internal.io.RelativeFilePath, bioimageio.spec._internal.url.HttpUrl], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function _validate_md_suffix at 0x7f560c7e3560>), PlainSerializer(func=<function _package at 0x7f560ee284a0>, return_type=PydanticUndefined, when_used='unless-none'), FieldInfo(annotation=NoneType, required=True, examples=['https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/example_descriptions/models/unet2d_nuclei_broad/README.md', 'README.md'])]
    @@ -3225,7 +3295,7 @@
    Returns:
    - name: Annotated[str, RestrictCharacters(alphabet='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_- ()'), MinLen(min_length=5), MaxLen(max_length=128), AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f6373b9a840>, severity=20, msg='Name longer than 64 characters.', context={'typ': Annotated[Any, MaxLen(max_length=64)]})] + name: Annotated[str, RestrictCharacters(alphabet='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_- ()'), MinLen(min_length=5), MaxLen(max_length=128), AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f5602452b60>, severity=20, msg='Name longer than 64 characters.', context={'typ': Annotated[Any, MaxLen(max_length=64)]})]
    @@ -3281,7 +3351,7 @@
    Returns:
    - run_mode: Annotated[Optional[bioimageio.spec.model.v0_4.RunMode], AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f6373b9aca0>, severity=30, msg="Run mode '{value}' has limited support across consumer softwares.", context={'typ': None})] + run_mode: Annotated[Optional[bioimageio.spec.model.v0_4.RunMode], AfterWarner(func=<function as_warning.<locals>.wrapper at 0x7f5602452fc0>, severity=30, msg="Run mode '{value}' has limited support across consumer softwares.", context={'typ': None})]
    @@ -3324,7 +3394,7 @@
    Returns:
    - weights: Annotated[bioimageio.spec.model.v0_5.WeightsDescr, WrapSerializer(func=<function package_weights at 0x7f63748a5e40>, return_type=PydanticUndefined, when_used='always')] + weights: Annotated[bioimageio.spec.model.v0_5.WeightsDescr, WrapSerializer(func=<function package_weights at 0x7f56031bdd00>, return_type=PydanticUndefined, when_used='always')]
    @@ -3348,10 +3418,10 @@
    Returns:
    -
    2392    def get_input_test_arrays(self) -> List[NDArray[Any]]:
    -2393        data = [load_array(ipt.test_tensor.download().path) for ipt in self.inputs]
    -2394        assert all(isinstance(d, np.ndarray) for d in data)
    -2395        return data
    +            
    2404    def get_input_test_arrays(self) -> List[NDArray[Any]]:
    +2405        data = [load_array(ipt.test_tensor.download().path) for ipt in self.inputs]
    +2406        assert all(isinstance(d, np.ndarray) for d in data)
    +2407        return data
     
    @@ -3369,10 +3439,10 @@
    Returns:
    -
    2397    def get_output_test_arrays(self) -> List[NDArray[Any]]:
    -2398        data = [load_array(out.test_tensor.download().path) for out in self.outputs]
    -2399        assert all(isinstance(d, np.ndarray) for d in data)
    -2400        return data
    +            
    2409    def get_output_test_arrays(self) -> List[NDArray[Any]]:
    +2410        data = [load_array(out.test_tensor.download().path) for out in self.outputs]
    +2411        assert all(isinstance(d, np.ndarray) for d in data)
    +2412        return data
     
    @@ -3391,25 +3461,25 @@
    Returns:
    -
    2402    @staticmethod
    -2403    def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int:
    -2404        batch_size = 1
    -2405        tensor_with_batchsize: Optional[TensorId] = None
    -2406        for tid in tensor_sizes:
    -2407            for aid, s in tensor_sizes[tid].items():
    -2408                if aid != BATCH_AXIS_ID or s == 1 or s == batch_size:
    -2409                    continue
    -2410
    -2411                if batch_size != 1:
    -2412                    assert tensor_with_batchsize is not None
    -2413                    raise ValueError(
    -2414                        f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})"
    -2415                    )
    -2416
    -2417                batch_size = s
    -2418                tensor_with_batchsize = tid
    -2419
    -2420        return batch_size
    +            
    2414    @staticmethod
    +2415    def get_batch_size(tensor_sizes: Mapping[TensorId, Mapping[AxisId, int]]) -> int:
    +2416        batch_size = 1
    +2417        tensor_with_batchsize: Optional[TensorId] = None
    +2418        for tid in tensor_sizes:
    +2419            for aid, s in tensor_sizes[tid].items():
    +2420                if aid != BATCH_AXIS_ID or s == 1 or s == batch_size:
    +2421                    continue
    +2422
    +2423                if batch_size != 1:
    +2424                    assert tensor_with_batchsize is not None
    +2425                    raise ValueError(
    +2426                        f"batch size mismatch for tensors '{tensor_with_batchsize}' ({batch_size}) and '{tid}' ({s})"
    +2427                    )
    +2428
    +2429                batch_size = s
    +2430                tensor_with_batchsize = tid
    +2431
    +2432        return batch_size
     
    @@ -3427,17 +3497,17 @@
    Returns:
    -
    2422    def get_output_tensor_sizes(
    -2423        self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]
    -2424    ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]:
    -2425        """Returns the tensor output sizes for given **input_sizes**.
    -2426        Only if **input_sizes** has a valid input shape, the tensor output size is exact.
    -2427        Otherwise it might be larger than the actual (valid) output"""
    -2428        batch_size = self.get_batch_size(input_sizes)
    -2429        ns = self.get_ns(input_sizes)
    -2430
    -2431        tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size)
    -2432        return tensor_sizes.outputs
    +            
    2434    def get_output_tensor_sizes(
    +2435        self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]
    +2436    ) -> Dict[TensorId, Dict[AxisId, Union[int, _DataDepSize]]]:
    +2437        """Returns the tensor output sizes for given **input_sizes**.
    +2438        Only if **input_sizes** has a valid input shape, the tensor output size is exact.
    +2439        Otherwise it might be larger than the actual (valid) output"""
    +2440        batch_size = self.get_batch_size(input_sizes)
    +2441        ns = self.get_ns(input_sizes)
    +2442
    +2443        tensor_sizes = self.get_tensor_sizes(ns, batch_size=batch_size)
    +2444        return tensor_sizes.outputs
     
    @@ -3459,22 +3529,22 @@
    Returns:
    -
    2434    def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]):
    -2435        """get parameter `n` for each parameterized axis
    -2436        such that the valid input size is >= the given input size"""
    -2437        ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {}
    -2438        axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs}
    -2439        for tid in input_sizes:
    -2440            for aid, s in input_sizes[tid].items():
    -2441                size_descr = axes[tid][aid].size
    -2442                if isinstance(size_descr, ParameterizedSize):
    -2443                    ret[(tid, aid)] = size_descr.get_n(s)
    -2444                elif size_descr is None or isinstance(size_descr, (int, SizeReference)):
    -2445                    pass
    -2446                else:
    -2447                    assert_never(size_descr)
    -2448
    -2449        return ret
    +            
    2446    def get_ns(self, input_sizes: Mapping[TensorId, Mapping[AxisId, int]]):
    +2447        """get parameter `n` for each parameterized axis
    +2448        such that the valid input size is >= the given input size"""
    +2449        ret: Dict[Tuple[TensorId, AxisId], ParameterizedSize_N] = {}
    +2450        axes = {t.id: {a.id: a for a in t.axes} for t in self.inputs}
    +2451        for tid in input_sizes:
    +2452            for aid, s in input_sizes[tid].items():
    +2453                size_descr = axes[tid][aid].size
    +2454                if isinstance(size_descr, ParameterizedSize):
    +2455                    ret[(tid, aid)] = size_descr.get_n(s)
    +2456                elif size_descr is None or isinstance(size_descr, (int, SizeReference)):
    +2457                    pass
    +2458                else:
    +2459                    assert_never(size_descr)
    +2460
    +2461        return ret
     
    @@ -3495,28 +3565,28 @@
    Returns:
    -
    2451    def get_tensor_sizes(
    -2452        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
    -2453    ) -> _TensorSizes:
    -2454        axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size)
    -2455        return _TensorSizes(
    -2456            {
    -2457                t: {
    -2458                    aa: axis_sizes.inputs[(tt, aa)]
    -2459                    for tt, aa in axis_sizes.inputs
    -2460                    if tt == t
    -2461                }
    -2462                for t in {tt for tt, _ in axis_sizes.inputs}
    -2463            },
    -2464            {
    -2465                t: {
    -2466                    aa: axis_sizes.outputs[(tt, aa)]
    -2467                    for tt, aa in axis_sizes.outputs
    -2468                    if tt == t
    -2469                }
    -2470                for t in {tt for tt, _ in axis_sizes.outputs}
    -2471            },
    -2472        )
    +            
    2463    def get_tensor_sizes(
    +2464        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
    +2465    ) -> _TensorSizes:
    +2466        axis_sizes = self.get_axis_sizes(ns, batch_size=batch_size)
    +2467        return _TensorSizes(
    +2468            {
    +2469                t: {
    +2470                    aa: axis_sizes.inputs[(tt, aa)]
    +2471                    for tt, aa in axis_sizes.inputs
    +2472                    if tt == t
    +2473                }
    +2474                for t in {tt for tt, _ in axis_sizes.inputs}
    +2475            },
    +2476            {
    +2477                t: {
    +2478                    aa: axis_sizes.outputs[(tt, aa)]
    +2479                    for tt, aa in axis_sizes.outputs
    +2480                    if tt == t
    +2481                }
    +2482                for t in {tt for tt, _ in axis_sizes.outputs}
    +2483            },
    +2484        )
     
    @@ -3528,84 +3598,170 @@
    Returns:
    def - get_axis_sizes( self, ns: Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], batch_size: int) -> bioimageio.spec.model.v0_5._AxisSizes: + get_axis_sizes( self, ns: Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int], batch_size: Optional[int] = None, *, max_input_shape: Optional[Mapping[Tuple[bioimageio.spec.model.v0_5.TensorId, bioimageio.spec.model.v0_5.AxisId], int]] = None) -> bioimageio.spec.model.v0_5._AxisSizes:
    -
    2474    def get_axis_sizes(
    -2475        self, ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N], batch_size: int
    -2476    ) -> _AxisSizes:
    -2477        all_axes = {
    -2478            t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs)
    -2479        }
    -2480
    -2481        inputs: Dict[Tuple[TensorId, AxisId], int] = {}
    -2482        outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {}
    -2483
    -2484        def get_axis_size(a: Union[InputAxis, OutputAxis]):
    -2485            if isinstance(a, BatchAxis):
    -2486                if (t_descr.id, a.id) in ns:
    -2487                    raise ValueError(
    -2488                        "No size increment factor (n) for batch axis of tensor"
    -2489                        + f" '{t_descr.id}' expected."
    -2490                    )
    -2491                return batch_size
    -2492            elif isinstance(a.size, int):
    -2493                if (t_descr.id, a.id) in ns:
    -2494                    raise ValueError(
    -2495                        "No size increment factor (n) for fixed size axis"
    -2496                        + f" '{a.id}' of tensor '{t_descr.id}' expected."
    -2497                    )
    -2498                return a.size
    -2499            elif isinstance(a.size, ParameterizedSize):
    -2500                if (t_descr.id, a.id) not in ns:
    -2501                    raise ValueError(
    -2502                        "Size increment factor (n) missing for parametrized axis"
    -2503                        + f" '{a.id}' of tensor '{t_descr.id}'."
    -2504                    )
    -2505                return a.size.get_size(ns[(t_descr.id, a.id)])
    -2506            elif isinstance(a.size, SizeReference):
    -2507                if (t_descr.id, a.id) in ns:
    -2508                    raise ValueError(
    -2509                        f"No size increment factor (n) for axis '{a.id}' of tensor"
    -2510                        + f" '{t_descr.id}' with size reference expected."
    -2511                    )
    -2512                assert not isinstance(a, BatchAxis)
    -2513                ref_axis = all_axes[a.size.tensor_id][a.size.axis_id]
    -2514                assert not isinstance(ref_axis, BatchAxis)
    -2515                return a.size.get_size(
    -2516                    axis=a,
    -2517                    ref_axis=ref_axis,
    -2518                    n=ns.get((a.size.tensor_id, a.size.axis_id), 0),
    -2519                )
    -2520            elif isinstance(a.size, DataDependentSize):
    -2521                if (t_descr.id, a.id) in ns:
    -2522                    raise ValueError(
    -2523                        "No size increment factor (n) for data dependent size axis"
    -2524                        + f" '{a.id}' of tensor '{t_descr.id}' expected."
    -2525                    )
    -2526                return _DataDepSize(a.size.min, a.size.max)
    -2527            else:
    -2528                assert_never(a.size)
    -2529
    -2530        for t_descr in self.inputs:
    -2531            for a in t_descr.axes:
    -2532                s = get_axis_size(a)
    -2533                assert not isinstance(s, _DataDepSize)
    -2534                inputs[t_descr.id, a.id] = s
    -2535
    -2536        for t_descr in chain(self.inputs, self.outputs):
    -2537            for a in t_descr.axes:
    -2538                s = get_axis_size(a)
    -2539                outputs[t_descr.id, a.id] = s
    -2540
    -2541        return _AxisSizes(inputs=inputs, outputs=outputs)
    +            
    2486    def get_axis_sizes(
    +2487        self,
    +2488        ns: Mapping[Tuple[TensorId, AxisId], ParameterizedSize_N],
    +2489        batch_size: Optional[int] = None,
    +2490        *,
    +2491        max_input_shape: Optional[Mapping[Tuple[TensorId, AxisId], int]] = None,
    +2492    ) -> _AxisSizes:
    +2493        """Determine input and output block shape for scale factors **ns**
    +2494        of parameterized input sizes.
    +2495
    +2496        Args:
    +2497            ns: Scale factor `n` for each axis (keyed by (tensor_id, axis_id))
    +2498                that is parameterized as `size = min + n * step`.
    +2499            batch_size: The desired size of the batch dimension.
    +2500                If given **batch_size** overwrites any batch size present in
    +2501                **max_input_shape**. Default 1.
    +2502            max_input_shape: Limits the derived block shapes.
    +2503                Each axis for which the input size, parameterized by `n`, is larger
    +2504                than **max_input_shape** is set to the minimal value `n_min` for which
    +2505                this is still true.
    +2506                Use this for small input samples or large values of **ns**.
    +2507                Or simply whenever you know the full input shape.
    +2508
    +2509        Returns:
    +2510            Resolved axis sizes for model inputs and outputs.
    +2511        """
    +2512        max_input_shape = max_input_shape or {}
    +2513        if batch_size is None:
    +2514            for (_t_id, a_id), s in max_input_shape.items():
    +2515                if a_id == BATCH_AXIS_ID:
    +2516                    batch_size = s
    +2517                    break
    +2518            else:
    +2519                batch_size = 1
    +2520
    +2521        all_axes = {
    +2522            t.id: {a.id: a for a in t.axes} for t in chain(self.inputs, self.outputs)
    +2523        }
    +2524
    +2525        inputs: Dict[Tuple[TensorId, AxisId], int] = {}
    +2526        outputs: Dict[Tuple[TensorId, AxisId], Union[int, _DataDepSize]] = {}
    +2527
    +2528        def get_axis_size(a: Union[InputAxis, OutputAxis]):
    +2529            if isinstance(a, BatchAxis):
    +2530                if (t_descr.id, a.id) in ns:
    +2531                    logger.warning(
    +2532                        "Ignoring unexpected size increment factor (n) for batch axis"
    +2533                        + " of tensor '{}'.",
    +2534                        t_descr.id,
    +2535                    )
    +2536                return batch_size
    +2537            elif isinstance(a.size, int):
    +2538                if (t_descr.id, a.id) in ns:
    +2539                    logger.warning(
    +2540                        "Ignoring unexpected size increment factor (n) for fixed size"
    +2541                        + " axis '{}' of tensor '{}'.",
    +2542                        a.id,
    +2543                        t_descr.id,
    +2544                    )
    +2545                return a.size
    +2546            elif isinstance(a.size, ParameterizedSize):
    +2547                if (t_descr.id, a.id) not in ns:
    +2548                    raise ValueError(
    +2549                        "Size increment factor (n) missing for parametrized axis"
    +2550                        + f" '{a.id}' of tensor '{t_descr.id}'."
    +2551                    )
    +2552                n = ns[(t_descr.id, a.id)]
    +2553                s_max = max_input_shape.get((t_descr.id, a.id))
    +2554                if s_max is not None:
    +2555                    n = min(n, a.size.get_n(s_max))
    +2556
    +2557                return a.size.get_size(n)
    +2558
    +2559            elif isinstance(a.size, SizeReference):
    +2560                if (t_descr.id, a.id) in ns:
    +2561                    logger.warning(
    +2562                        "Ignoring unexpected size increment factor (n) for axis '{}'"
    +2563                        + " of tensor '{}' with size reference.",
    +2564                        a.id,
    +2565                        t_descr.id,
    +2566                    )
    +2567                assert not isinstance(a, BatchAxis)
    +2568                ref_axis = all_axes[a.size.tensor_id][a.size.axis_id]
    +2569                assert not isinstance(ref_axis, BatchAxis)
    +2570                ref_key = (a.size.tensor_id, a.size.axis_id)
    +2571                ref_size = inputs.get(ref_key, outputs.get(ref_key))
    +2572                assert ref_size is not None, ref_key
    +2573                assert not isinstance(ref_size, _DataDepSize), ref_key
    +2574                return a.size.get_size(
    +2575                    axis=a,
    +2576                    ref_axis=ref_axis,
    +2577                    ref_size=ref_size,
    +2578                )
    +2579            elif isinstance(a.size, DataDependentSize):
    +2580                if (t_descr.id, a.id) in ns:
    +2581                    logger.warning(
    +2582                        "Ignoring unexpected increment factor (n) for data dependent"
    +2583                        + " size axis '{}' of tensor '{}'.",
    +2584                        a.id,
    +2585                        t_descr.id,
    +2586                    )
    +2587                return _DataDepSize(a.size.min, a.size.max)
    +2588            else:
    +2589                assert_never(a.size)
    +2590
    +2591        # first resolve all , but the `SizeReference` input sizes
    +2592        for t_descr in self.inputs:
    +2593            for a in t_descr.axes:
    +2594                if not isinstance(a.size, SizeReference):
    +2595                    s = get_axis_size(a)
    +2596                    assert not isinstance(s, _DataDepSize)
    +2597                    inputs[t_descr.id, a.id] = s
    +2598
    +2599        # resolve all other input axis sizes
    +2600        for t_descr in self.inputs:
    +2601            for a in t_descr.axes:
    +2602                if isinstance(a.size, SizeReference):
    +2603                    s = get_axis_size(a)
    +2604                    assert not isinstance(s, _DataDepSize)
    +2605                    inputs[t_descr.id, a.id] = s
    +2606
    +2607        # resolve all output axis sizes
    +2608        for t_descr in self.outputs:
    +2609            for a in t_descr.axes:
    +2610                assert not isinstance(a.size, ParameterizedSize)
    +2611                s = get_axis_size(a)
    +2612                outputs[t_descr.id, a.id] = s
    +2613
    +2614        return _AxisSizes(inputs=inputs, outputs=outputs)
     
    - +

    Determine input and output block shape for scale factors ns +of parameterized input sizes.

    + +
    Arguments:
    + +
      +
    • ns: Scale factor n for each axis (keyed by (tensor_id, axis_id)) +that is parameterized as size = min + n * step.
    • +
    • batch_size: The desired size of the batch dimension. +If given batch_size overwrites any batch size present in +max_input_shape. Default 1.
    • +
    • max_input_shape: Limits the derived block shapes. +Each axis for which the input size, parameterized by n, is larger +than max_input_shape is set to the minimal value n_min for which +this is still true. +Use this for small input samples or large values of ns. +Or simply whenever you know the full input shape.
    • +
    + +
    Returns:
    + +
    +

    Resolved axis sizes for model inputs and outputs.

    +
    +
    +
    @@ -3700,13 +3856,14 @@
    Inherited Members
    33 type: Literal["notebook"] = "notebook" 34 35 id: Optional[NotebookId] = None -36 """Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)""" -37 -38 parent: Optional[NotebookId] = None -39 """The description from which this one is derived""" -40 -41 source: NotebookSource -42 """The Jupyter notebook""" +36 """bioimage.io-wide unique resource identifier +37 assigned by bioimage.io; version **un**specific.""" +38 +39 parent: Optional[NotebookId] = None +40 """The description from which this one is derived""" +41 +42 source: NotebookSource +43 """The Jupyter notebook"""
    @@ -3733,7 +3890,8 @@
    Inherited Members
    -

    Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)

    +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    diff --git a/bioimageio/spec/application.html b/bioimageio/spec/application.html index 77e5c5cc1..892d2d717 100644 --- a/bioimageio/spec/application.html +++ b/bioimageio/spec/application.html @@ -146,16 +146,17 @@

    35 type: Literal["application"] = "application" 36 37 id: Optional[ApplicationId] = None -38 """Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)""" -39 -40 parent: Optional[ApplicationId] = None -41 """The description from which this one is derived""" -42 -43 source: Annotated[ -44 Optional[ImportantFileSource], -45 Field(description="URL or path to the source of the application"), -46 ] = None -47 """The primary source of the application""" +38 """bioimage.io-wide unique resource identifier +39 assigned by bioimage.io; version **un**specific.""" +40 +41 parent: Optional[ApplicationId] = None +42 """The description from which this one is derived""" +43 +44 source: Annotated[ +45 Optional[ImportantFileSource], +46 Field(description="URL or path to the source of the application"), +47 ] = None +48 """The primary source of the application"""

    @@ -182,7 +183,8 @@

    -

    Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)

    +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    @@ -202,7 +204,7 @@

    - source: Annotated[Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f63806efce0>), PlainSerializer(func=<function _package at 0x7f63805244a0>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, description='URL or path to the source of the application')] + source: Annotated[Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f560edf7ce0>), PlainSerializer(func=<function _package at 0x7f560ee284a0>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, description='URL or path to the source of the application')]
    diff --git a/bioimageio/spec/application/v0_2.html b/bioimageio/spec/application/v0_2.html index b442cde11..d7893944a 100644 --- a/bioimageio/spec/application/v0_2.html +++ b/bioimageio/spec/application/v0_2.html @@ -131,23 +131,24 @@

    33 type: Literal["application"] = "application" 34 35 id: Optional[ApplicationId] = None -36 """Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)""" -37 -38 source: Annotated[ -39 Optional[ImportantFileSource], -40 Field(description="URL or path to the source of the application"), -41 ] = None -42 """The primary source of the application""" -43 +36 """bioimage.io-wide unique resource identifier +37 assigned by bioimage.io; version **un**specific.""" +38 +39 source: Annotated[ +40 Optional[ImportantFileSource], +41 Field(description="URL or path to the source of the application"), +42 ] = None +43 """The primary source of the application""" 44 -45class LinkedApplication(Node): -46 """Reference to a bioimage.io application.""" -47 -48 id: ApplicationId -49 """A valid application `id` from the bioimage.io collection.""" -50 -51 version_number: Optional[int] = None -52 """version number (n-th published version, not the semantic version) of linked application""" +45 +46class LinkedApplication(Node): +47 """Reference to a bioimage.io application.""" +48 +49 id: ApplicationId +50 """A valid application `id` from the bioimage.io collection.""" +51 +52 version_number: Optional[int] = None +53 """version number (n-th published version, not the semantic version) of linked application"""

    @@ -208,13 +209,14 @@

    Inherited Members
    34 type: Literal["application"] = "application" 35 36 id: Optional[ApplicationId] = None -37 """Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)""" -38 -39 source: Annotated[ -40 Optional[ImportantFileSource], -41 Field(description="URL or path to the source of the application"), -42 ] = None -43 """The primary source of the application""" +37 """bioimage.io-wide unique resource identifier +38 assigned by bioimage.io; version **un**specific.""" +39 +40 source: Annotated[ +41 Optional[ImportantFileSource], +42 Field(description="URL or path to the source of the application"), +43 ] = None +44 """The primary source of the application""" @@ -241,14 +243,15 @@
    Inherited Members
    -

    Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)

    +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    - source: Annotated[Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f63806efce0>), PlainSerializer(func=<function _package at 0x7f63805244a0>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, description='URL or path to the source of the application')] + source: Annotated[Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f560edf7ce0>), PlainSerializer(func=<function _package at 0x7f560ee284a0>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, description='URL or path to the source of the application')]
    @@ -357,14 +360,14 @@
    Inherited Members
    -
    46class LinkedApplication(Node):
    -47    """Reference to a bioimage.io application."""
    -48
    -49    id: ApplicationId
    -50    """A valid application `id` from the bioimage.io collection."""
    -51
    -52    version_number: Optional[int] = None
    -53    """version number (n-th published version, not the semantic version) of linked application"""
    +            
    47class LinkedApplication(Node):
    +48    """Reference to a bioimage.io application."""
    +49
    +50    id: ApplicationId
    +51    """A valid application `id` from the bioimage.io collection."""
    +52
    +53    version_number: Optional[int] = None
    +54    """version number (n-th published version, not the semantic version) of linked application"""
     
    diff --git a/bioimageio/spec/application/v0_3.html b/bioimageio/spec/application/v0_3.html index 066027d6c..bd940ea8a 100644 --- a/bioimageio/spec/application/v0_3.html +++ b/bioimageio/spec/application/v0_3.html @@ -132,23 +132,24 @@

    34 type: Literal["application"] = "application" 35 36 id: Optional[ApplicationId] = None -37 """Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)""" -38 -39 parent: Optional[ApplicationId] = None -40 """The description from which this one is derived""" -41 -42 source: Annotated[ -43 Optional[ImportantFileSource], -44 Field(description="URL or path to the source of the application"), -45 ] = None -46 """The primary source of the application""" -47 +37 """bioimage.io-wide unique resource identifier +38 assigned by bioimage.io; version **un**specific.""" +39 +40 parent: Optional[ApplicationId] = None +41 """The description from which this one is derived""" +42 +43 source: Annotated[ +44 Optional[ImportantFileSource], +45 Field(description="URL or path to the source of the application"), +46 ] = None +47 """The primary source of the application""" 48 -49class LinkedApplication(LinkedResourceNode): -50 """Reference to a bioimage.io application.""" -51 -52 id: ApplicationId -53 """A valid application `id` from the bioimage.io collection.""" +49 +50class LinkedApplication(LinkedResourceNode): +51 """Reference to a bioimage.io application.""" +52 +53 id: ApplicationId +54 """A valid application `id` from the bioimage.io collection."""

    @@ -209,16 +210,17 @@
    Inherited Members
    35 type: Literal["application"] = "application" 36 37 id: Optional[ApplicationId] = None -38 """Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)""" -39 -40 parent: Optional[ApplicationId] = None -41 """The description from which this one is derived""" -42 -43 source: Annotated[ -44 Optional[ImportantFileSource], -45 Field(description="URL or path to the source of the application"), -46 ] = None -47 """The primary source of the application""" +38 """bioimage.io-wide unique resource identifier +39 assigned by bioimage.io; version **un**specific.""" +40 +41 parent: Optional[ApplicationId] = None +42 """The description from which this one is derived""" +43 +44 source: Annotated[ +45 Optional[ImportantFileSource], +46 Field(description="URL or path to the source of the application"), +47 ] = None +48 """The primary source of the application""" @@ -245,7 +247,8 @@
    Inherited Members
    -

    Model zoo (bioimage.io) wide, unique identifier (assigned by bioimage.io)

    +

    bioimage.io-wide unique resource identifier +assigned by bioimage.io; version unspecific.

    @@ -265,7 +268,7 @@
    Inherited Members
    - source: Annotated[Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f63806efce0>), PlainSerializer(func=<function _package at 0x7f63805244a0>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, description='URL or path to the source of the application')] + source: Annotated[Optional[Annotated[Union[bioimageio.spec._internal.url.HttpUrl, bioimageio.spec._internal.io.RelativeFilePath, Annotated[pathlib.Path, PathType(path_type='file')]], FieldInfo(annotation=NoneType, required=True, metadata=[_PydanticGeneralMetadata(union_mode='left_to_right')]), AfterValidator(func=<function wo_special_file_name at 0x7f560edf7ce0>), PlainSerializer(func=<function _package at 0x7f560ee284a0>, return_type=PydanticUndefined, when_used='unless-none')]], FieldInfo(annotation=NoneType, required=True, description='URL or path to the source of the application')]
    @@ -370,11 +373,11 @@
    Inherited Members
    -
    50class LinkedApplication(LinkedResourceNode):
    -51    """Reference to a bioimage.io application."""
    -52
    -53    id: ApplicationId
    -54    """A valid application `id` from the bioimage.io collection."""
    +            
    51class LinkedApplication(LinkedResourceNode):
    +52    """Reference to a bioimage.io application."""
    +53
    +54    id: ApplicationId
    +55    """A valid application `id` from the bioimage.io collection."""
     
    @@ -395,6 +398,15 @@
    Inherited Members
    +
    +
    Inherited Members
    +
    +
    bioimageio.spec.generic.v0_3.LinkedResourceNode
    +
    version
    + +
    +
    +
    @@ -12,7 +12,7 @@

    Coverage for bioimageio/spec/generic/_v0_2_converter.py: - 90% + 77%

    73 statements   - - + +

    @@ -65,7 +65,7 @@

    » next       coverage.py v7.6.4, - created at 2024-11-12 08:38 +0000 + created at 2024-11-13 08:44 +0000

    diff --git a/coverage/z_7c461fd3866d8843__v0_3_converter_py.html b/coverage/z_7c461fd3866d8843__v0_3_converter_py.html index 83eb9d042..5a614eb6d 100644 --- a/coverage/z_7c461fd3866d8843__v0_3_converter_py.html +++ b/coverage/z_7c461fd3866d8843__v0_3_converter_py.html @@ -2,7 +2,7 @@ - Coverage for bioimageio/spec/generic/_v0_3_converter.py: 79% + Coverage for bioimageio/spec/generic/_v0_3_converter.py: 81% @@ -12,7 +12,7 @@

    Coverage for bioimageio/spec/generic/_v0_3_converter.py: - 79% + 81%

    42 statements   - - + +

    @@ -65,7 +65,7 @@

    » next       coverage.py v7.6.4, - created at 2024-11-12 08:38 +0000 + created at 2024-11-13 08:44 +0000

    diff --git a/coverage/z_7c461fd3866d8843_v0_2_py.html b/coverage/z_7c461fd3866d8843_v0_2_py.html index 4122c8257..327d0d1c4 100644 --- a/coverage/z_7c461fd3866d8843_v0_2_py.html +++ b/coverage/z_7c461fd3866d8843_v0_2_py.html @@ -65,7 +65,7 @@

    » next       coverage.py v7.6.4, - created at 2024-11-12 08:38 +0000 + created at 2024-11-13 08:44 +0000

    - 177 statements   - - + 179 statements   + +

    @@ -65,7 +65,7 @@

    » next       coverage.py v7.6.4, - created at 2024-11-12 08:38 +0000 + created at 2024-11-13 08:44 +0000