From ab600f9625b4c926ad4bfb4eb4905f8dd0c5f36b Mon Sep 17 00:00:00 2001 From: Mehdi Seifi Date: Fri, 15 Mar 2024 10:18:46 +0100 Subject: [PATCH 1/3] fisrt try to update prediction module --- bioimageio/core/_prediction_pipeline.py | 9 +- bioimageio/core/prediction.py | 211 +++++++++++++----------- 2 files changed, 123 insertions(+), 97 deletions(-) diff --git a/bioimageio/core/_prediction_pipeline.py b/bioimageio/core/_prediction_pipeline.py index a596c790..07a91446 100644 --- a/bioimageio/core/_prediction_pipeline.py +++ b/bioimageio/core/_prediction_pipeline.py @@ -32,10 +32,12 @@ def __init__( super().__init__() if bioimageio_model.run_mode: warnings.warn( - f"Not yet implemented inference for run mode '{bioimageio_model.run_mode.name}'" + "Not yet implemented inference for run mode " + + f"'{bioimageio_model.run_mode.name}'" ) self.name = name + self.input_specs = bioimageio_model.inputs self._preprocessing = preprocessing self._postprocessing = postprocessing if isinstance(bioimageio_model, v0_4.ModelDescr): @@ -65,7 +67,7 @@ def predict( ) -> List[xr.DataArray]: """Predict input_tensor with the model without applying pre/postprocessing.""" named_tensors = [ - named_input_tensors[str(k)] for k in self.input_ids[len(input_tensors) :] + named_input_tensors[str(k)] for k in self.input_ids[len(input_tensors):] ] return self._adapter.forward(*input_tensors, *named_tensors) @@ -113,7 +115,8 @@ def forward( def load(self): """ - optional step: load model onto devices before calling forward if not using it as context manager + optional step: load model onto devices before calling forward + if not using it as context manager """ self._adapter.load() diff --git a/bioimageio/core/prediction.py b/bioimageio/core/prediction.py index e9ec7256..13148335 100644 --- a/bioimageio/core/prediction.py +++ b/bioimageio/core/prediction.py @@ -1,23 +1,31 @@ -"""coming soon""" - # TODO: update -# import collections -# import os -# from fractions import Fraction -# from itertools import product -# from pathlib import Path -# from typing import Any, Dict, Hashable, Iterator, List, NamedTuple, Optional, OrderedDict, Sequence, Tuple, Union - -# import numpy as np -# import xarray as xr -# from bioimageio.spec import ResourceDescr -# from bioimageio.spec.model.v0_5 import AxisType -# from numpy.typing import NDArray +import collections +import os +from fractions import Fraction +from itertools import product +from pathlib import Path +from typing import ( + Any, Dict, Hashable, Iterator, List, NamedTuple, + Optional, OrderedDict, Sequence, Tuple, Union +) + +import numpy as np +import xarray as xr +# from .._internal.types import NotEmpty as NotEmpty +from bioimageio.spec.common import PermissiveFileSource +from bioimageio.spec.model.v0_5 import AxisType, ModelDescr, WeightsFormat +from bioimageio.spec.model.v0_5 import InputAxis +from bioimageio.spec.model.v0_5 import InputTensorDescr +from bioimageio.spec.model.v0_4 import AxesStr, ImplicitOutputShape +from bioimageio.spec.model.v0_4 import InputTensorDescr as _InputTensorDescr +from numpy.typing import NDArray # from pydantic import HttpUrl -# from tqdm import tqdm +from tqdm import tqdm -# from bioimageio.core import image_helper, load_description -# from bioimageio.core.prediction_pipeline import PredictionPipeline, create_prediction_pipeline +from bioimageio.core import load_description +from bioimageio.core.common import Sample, Axis, TensorId +from bioimageio.core.utils import image_helper +from bioimageio.core import PredictionPipeline, create_prediction_pipeline # from bioimageio.core.resource_io.nodes import ImplicitOutputShape, Model, ResourceDescr # Axis = Hashable @@ -29,6 +37,23 @@ # local: Dict[Axis, slice] +def get_samples( + inputs: Union[Tuple[Path, ...], List[Path]], + input_ids: List[TensorId], +): + input_tensors = [ + image_helper.load_tensor(input_path) + for input_path in inputs + ] + sample = Sample( + data={ + **dict(zip(input_ids, input_tensors)) + } + ) + + return sample + + # def get_tiling( # shape: Sequence[int], # tile_shape: Dict[Axis, int], @@ -138,27 +163,18 @@ # output[inner_tile] = out[local_tile] -# def predict( -# prediction_pipeline: PredictionPipeline, -# inputs: Union[ -# xr.DataArray, List[xr.DataArray], Tuple[xr.DataArray], NDArray[Any], List[NDArray[Any]], Tuple[NDArray[Any]] -# ], -# ) -> List[xr.DataArray]: -# """Run prediction for a single set of input(s) with a bioimage.io model - -# Args: -# prediction_pipeline: the prediction pipeline for the input model. -# inputs: the input(s) for this model represented as xarray data or numpy nd array. -# """ -# if not isinstance(inputs, (tuple, list)): -# inputs = [inputs] +def predict( + prediction_pipeline: PredictionPipeline, + input_sample: Sample, +) -> Sample: + """Run prediction for a single set of input(s) with a bioimage.io model -# assert len(inputs) == len(prediction_pipeline.input_specs) -# tagged_data = [ -# ipt if isinstance(ipt, xr.DataArray) else xr.DataArray(ipt, dims=ipt_spec.axes) -# for ipt, ipt_spec in zip(inputs, prediction_pipeline.input_specs) -# ] -# return prediction_pipeline.forward(*tagged_data) + Args: + prediction_pipeline: the prediction pipeline for the input model. + inputs: the input(s) for this model represented as xarray data or numpy nd array. + """ + assert len(input_sample.data) == len(prediction_pipeline.input_specs) + return prediction_pipeline.forward_sample(input_sample) # def _parse_padding(padding, input_specs): @@ -411,63 +427,70 @@ # return outputs -# def _predict_sample(prediction_pipeline, inputs, outputs, padding, tiling): -# if padding and tiling: -# raise ValueError("Only one of padding or tiling is supported") - -# input_data = image_helper.load_tensors(inputs, prediction_pipeline.input_specs) -# if padding is not None: -# result = predict_with_padding(prediction_pipeline, input_data, padding) -# elif tiling is not None: -# result = predict_with_tiling(prediction_pipeline, input_data, tiling) -# else: -# result = predict(prediction_pipeline, input_data) - -# assert isinstance(result, list) -# assert len(result) == len(outputs) -# for res, out in zip(result, outputs): -# image_helper.save_image(out, res) - - -# def predict_image( -# model_rdf: DescriptionSource, -# inputs: Union[Tuple[Path, ...], List[Path], Path], -# outputs: Union[Tuple[Path, ...], List[Path], Path], -# padding: Optional[Union[bool, Dict[str, int]]] = None, -# tiling: Optional[Union[bool, Dict[str, Dict[str, int]]]] = None, -# weight_format: Optional[str] = None, -# devices: Optional[List[str]] = None, -# verbose: bool = False, -# ): -# """Run prediction for a single set of input image(s) with a bioimage.io model. - -# Args: -# model_rdf: the bioimageio model. -# inputs: the filepaths for the input images. -# outputs: the filepaths for saving the input images. -# padding: the padding settings for prediction. By default no padding is used. -# tiling: the tiling settings for prediction. By default no tiling is used. -# weight_format: the weight format to use for predictions. -# devices: the devices to use for prediction. -# verbose: run prediction in verbose mode. -# """ -# if not isinstance(inputs, (tuple, list)): -# inputs = [inputs] - -# if not isinstance(outputs, (tuple, list)): -# outputs = [outputs] - -# model = load_description(model_rdf) -# assert isinstance(model, Model) -# if len(model.inputs) != len(inputs): -# raise ValueError -# if len(model.outputs) != len(outputs): -# raise ValueError - -# with create_prediction_pipeline( -# bioimageio_model=model, weight_format=weight_format, devices=devices -# ) as prediction_pipeline: -# _predict_sample(prediction_pipeline, inputs, outputs, padding, tiling) +def _predict_sample( + prediction_pipeline: PredictionPipeline, + sample: Sample, + outputs: Union[Tuple[Path, ...], List[Path]], + padding: Optional[Union[bool, Dict[str, int]]], + tiling: Optional[Union[bool, Dict[str, Dict[str, int]]]] +): + if padding and tiling: + raise ValueError("Only one of padding or tiling is supported") + + if padding is not None: + # result = predict_with_padding(prediction_pipeline, input_data, padding) + result = None + elif tiling is not None: + # result = predict_with_tiling(prediction_pipeline, input_data, tiling) + result = None + else: + result = predict(prediction_pipeline, sample) + + assert isinstance(result, list) + assert len(result.data) == len(outputs) + # for res, out in zip(result, outputs): + # image_helper.save_image(out, res) + + +def predict_image( + model_rdf: PermissiveFileSource, + inputs: Union[Tuple[Path, ...], List[Path], Path], + outputs: Union[Tuple[Path, ...], List[Path], Path], + padding: Optional[Union[bool, Dict[str, int]]] = None, + tiling: Optional[Union[bool, Dict[str, Dict[str, int]]]] = None, + weight_format: Optional[WeightsFormat] = None, + devices: Optional[List[str]] = None, + verbose: bool = False, +): + """Run prediction for a single set of input image(s) with a bioimage.io model. + + Args: + model_rdf: the bioimageio model. + inputs: the filepaths for the input images. + outputs: the filepaths for saving the input images. + padding: the padding settings for prediction. By default no padding is used. + tiling: the tiling settings for prediction. By default no tiling is used. + weight_format: the weight format to use for predictions. + devices: the devices to use for prediction. + verbose: run prediction in verbose mode. + """ + if not isinstance(inputs, (tuple, list)): + inputs = [inputs] + + if not isinstance(outputs, (tuple, list)): + outputs = [outputs] + + model = load_description(model_rdf) + assert isinstance(model, ModelDescr) + if len(model.inputs) != len(inputs): + raise ValueError + if len(model.outputs) != len(outputs): + raise ValueError + + with create_prediction_pipeline( + bioimageio_model=model, weight_format=weight_format, devices=devices + ) as prediction_pipeline: + _predict_sample(prediction_pipeline, inputs, outputs, padding, tiling) # def predict_images( From 36df627e7d0e400f72ce84c0ee4a0d28526d53b4 Mon Sep 17 00:00:00 2001 From: Mehdi Seifi Date: Fri, 15 Mar 2024 10:34:22 +0100 Subject: [PATCH 2/3] updated predict_image --- bioimageio/core/prediction.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bioimageio/core/prediction.py b/bioimageio/core/prediction.py index 13148335..2ad76061 100644 --- a/bioimageio/core/prediction.py +++ b/bioimageio/core/prediction.py @@ -37,7 +37,7 @@ # local: Dict[Axis, slice] -def get_samples( +def get_sample( inputs: Union[Tuple[Path, ...], List[Path]], input_ids: List[TensorId], ): @@ -490,7 +490,8 @@ def predict_image( with create_prediction_pipeline( bioimageio_model=model, weight_format=weight_format, devices=devices ) as prediction_pipeline: - _predict_sample(prediction_pipeline, inputs, outputs, padding, tiling) + sample = get_sample(inputs, prediction_pipeline.input_ids) + _predict_sample(prediction_pipeline, sample, outputs, padding, tiling) # def predict_images( From ed7ae6fa8ff92ce92e4e74a0fc5ad19cf705d3e9 Mon Sep 17 00:00:00 2001 From: Mehdi Seifi Date: Sun, 24 Mar 2024 17:35:43 +0100 Subject: [PATCH 3/3] fix test_predict_image --- bioimageio/core/prediction.py | 1 + tests/test_prediction.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/bioimageio/core/prediction.py b/bioimageio/core/prediction.py index 2ad76061..80eadd87 100644 --- a/bioimageio/core/prediction.py +++ b/bioimageio/core/prediction.py @@ -18,6 +18,7 @@ from bioimageio.spec.model.v0_5 import InputTensorDescr from bioimageio.spec.model.v0_4 import AxesStr, ImplicitOutputShape from bioimageio.spec.model.v0_4 import InputTensorDescr as _InputTensorDescr +from bioimageio.spec.model.v0_4 import ModelDescr as ModelDescr_v0_4 from numpy.typing import NDArray # from pydantic import HttpUrl from tqdm import tqdm diff --git a/tests/test_prediction.py b/tests/test_prediction.py index 2a6c4487..40496b2a 100644 --- a/tests/test_prediction.py +++ b/tests/test_prediction.py @@ -16,9 +16,10 @@ def test_predict_image(any_model: Path, tmpdir: Path): spec = load_description(any_model) assert isinstance(spec, ModelDescr) - inputs = spec.test_inputs + inputs = [Path(str(test_input)) for test_input in spec.test_inputs] outputs = [Path(tmpdir) / f"out{i}.npy" for i in range(len(spec.test_outputs))] + predict_image(any_model, inputs, outputs) for out_path in outputs: assert out_path.exists()