|
23 | 23 | "# we use napari for visualising images, you can install it via `pip install napari` or`conda install napari`\n",
|
24 | 24 | "import napari\n",
|
25 | 25 | "import numpy as np\n",
|
26 |
| - "import xarray as xr\n", |
27 |
| - "\n", |
28 |
| - "from bioimageio.core.prediction_pipeline import create_prediction_pipeline" |
| 26 | + "import xarray as xr" |
29 | 27 | ]
|
30 | 28 | },
|
31 | 29 | {
|
|
170 | 168 | "source": [
|
171 | 169 | "## Prediction with the model\n",
|
172 | 170 | "\n",
|
173 |
| - "`bioimageio.core` implements functionality to run predictions with a model in bioimage.io format.\n", |
174 |
| - "This includes functions to run prediction with numpy arrays (more precisely xarray DataArrays) and convenience functions to run predictions for inputs stored on disc." |
| 171 | + "`bioimageio.core` implements functionality to run prediction with models in the `bioimage.io` format.\n", |
| 172 | + "This includes functions to run prediction with `xarray.DataArrays` as input and convenience functions to run predictions for images stored on disc." |
175 | 173 | ]
|
176 | 174 | },
|
177 | 175 | {
|
|
181 | 179 | "metadata": {},
|
182 | 180 | "outputs": [],
|
183 | 181 | "source": [
|
184 |
| - "# load the example image for this model, which is stored in numpy file format\n", |
| 182 | + "# Load the example image for this model, which is stored in numpy file format.\n", |
185 | 183 | "input_image = np.load(model_resource.test_inputs[0])"
|
186 | 184 | ]
|
187 | 185 | },
|
| 186 | + { |
| 187 | + "cell_type": "code", |
| 188 | + "execution_count": null, |
| 189 | + "id": "07ff1e0c", |
| 190 | + "metadata": {}, |
| 191 | + "outputs": [], |
| 192 | + "source": [ |
| 193 | + "# Create an xarray.DataArray from the input image.\n", |
| 194 | + "# DataArrays are like numpy arrays, but they have annotated axes.\n", |
| 195 | + "# The axes are used to validate that the axes of the input image match the axes expected by a model.\n", |
| 196 | + "input_array = xr.DataArray(input_image, dims=tuple(model_resource.inputs[0].axes))\n", |
| 197 | + "# print the axis annotations ('dims') and the shape of the input array\n", |
| 198 | + "print(input_array.dims)\n", |
| 199 | + "print(input_array.shape)" |
| 200 | + ] |
| 201 | + }, |
188 | 202 | {
|
189 | 203 | "cell_type": "code",
|
190 | 204 | "execution_count": null,
|
191 | 205 | "id": "808e2ca7",
|
192 | 206 | "metadata": {},
|
193 | 207 | "outputs": [],
|
194 | 208 | "source": [
|
195 |
| - "# define a function to run prediction on a numpy input\n", |
196 |
| - "# \"devices\" can be used to run prediction on a gpu instead of the cpu\n", |
197 |
| - "# \"weight_format\" to specify which weight format to use in case the model contains different weight formats\n", |
198 |
| - "def predict_numpy(model, input_, devices=None, weight_format=None):\n", |
199 |
| - " # the prediction pipeline combines preprocessing, prediction and postprocessing.\n", |
200 |
| - " # it should always be used for prediction with a bioimageio model\n", |
201 |
| - " pred_pipeline = create_prediction_pipeline(\n", |
202 |
| - " bioimageio_model=model, devices=devices, weight_format=weight_format\n", |
203 |
| - " )\n", |
| 209 | + "# Next, create a 'prediction_pipeline'. The prediction_pipeline is used to run prediction with a given model.\n", |
| 210 | + "# This means it applies the preprocessing, runs inference with the model and applies the postprocessing.\n", |
| 211 | + "\n", |
| 212 | + "# The 'devices' argument can be used to specify which device(s) to use for inference with the model.\n", |
| 213 | + "# Hence it can be used to specify whether to use the cpu, a single gpu or multiple gpus (not implemented yet).\n", |
| 214 | + "# By default (devices=None) a gpu will be used if available and otherwise the cpu will be used.\n", |
| 215 | + "devices = None\n", |
204 | 216 | "\n",
|
205 |
| - " # the prediction pipeline expects inputs as xarray.DataArrays.\n", |
206 |
| - " # these are similar to numpy arrays, but allow for named dimensions (the dims keyword argument)\n", |
207 |
| - " # in bioimage.io the dims have to agree with the input axes required by the model\n", |
208 |
| - " axes = tuple(model.inputs[0].axes)\n", |
209 |
| - " input_tensor = xr.DataArray(input_, dims=axes)\n", |
210 |
| - " \n", |
211 |
| - " # the prediction pipeline call expects the same number of inputs as the number of inputs required by the model\n", |
212 |
| - " # in the case here, the model just expects a single input. in the case of multiple inputs use\n", |
213 |
| - " # prediction = pred_pipeline(input1, input2, ...)\n", |
214 |
| - " # or, if you have the inputs in a list or tuple\n", |
215 |
| - " # prediction = pred_pipeline(*inputs)\n", |
216 |
| - " # the call returns a list of output tensors, corresponding to the output tensors of the model\n", |
217 |
| - " # (in this case, we just have a single output)\n", |
218 |
| - " prediction = pred_pipeline(input_tensor)[0]\n", |
219 |
| - " return prediction" |
| 217 | + "# The 'weight_format' argument can be used to specify which weight format available in the model to use.\n", |
| 218 | + "# By default (weight_format=None) the weight format with highest priority (as defined by bioimageio.core) will be used.\n", |
| 219 | + "weight_format = None\n", |
| 220 | + "\n", |
| 221 | + "prediction_pipeline = bioimageio.core.create_prediction_pipeline(\n", |
| 222 | + " model_resource, devices=devices, weight_format=weight_format\n", |
| 223 | + ")" |
| 224 | + ] |
| 225 | + }, |
| 226 | + { |
| 227 | + "cell_type": "code", |
| 228 | + "execution_count": null, |
| 229 | + "id": "13c73742", |
| 230 | + "metadata": {}, |
| 231 | + "outputs": [], |
| 232 | + "source": [ |
| 233 | + "# Use the prediction pipeline to run prediction for the image we loaded before.\n", |
| 234 | + "# The prediction pipeline always returns a tuple (even if the model only has a single output tensor).\n", |
| 235 | + "# So we access the first element of the prediction to get the predicted tensor.\n", |
| 236 | + "prediction = prediction_pipeline(input_array)[0]\n", |
| 237 | + "show_images(input_image, prediction, names=[\"image\", \"prediction\"]) # show the prediction result" |
220 | 238 | ]
|
221 | 239 | },
|
222 | 240 | {
|
|
226 | 244 | "metadata": {},
|
227 | 245 | "outputs": [],
|
228 | 246 | "source": [
|
229 |
| - "# run prediction for the test input and show the result\n", |
230 |
| - "prediction = predict_numpy(model_resource, input_image)\n", |
231 |
| - "show_images(input_image, prediction, names=[\"image\", \"prediction\"])" |
| 247 | + "# The prediction pipeline expects inputs to have a shape that fits the model exactly.\n", |
| 248 | + "# So if the input does not fit the expected input shape the prediction will fail.\n", |
| 249 | + "# E.g. if we crop the input to shape [1, 1, 250, 250] it will not work for our example model,\n", |
| 250 | + "# which expects a spatial shape that is a multiple of 16\n", |
| 251 | + "cropped_image = input_image[:, :, :250, :250]\n", |
| 252 | + "cropped_array = xr.DataArray(cropped_image, dims=tuple(model_resource.inputs[0].axes))" |
| 253 | + ] |
| 254 | + }, |
| 255 | + { |
| 256 | + "cell_type": "code", |
| 257 | + "execution_count": null, |
| 258 | + "id": "f476af51", |
| 259 | + "metadata": {}, |
| 260 | + "outputs": [], |
| 261 | + "source": [ |
| 262 | + "# Applying the prediction pipeline to an image with the wrong shape will fail!\n", |
| 263 | + "prediction_pipeline(cropped_array)" |
| 264 | + ] |
| 265 | + }, |
| 266 | + { |
| 267 | + "cell_type": "code", |
| 268 | + "execution_count": null, |
| 269 | + "id": "580b0a36", |
| 270 | + "metadata": {}, |
| 271 | + "outputs": [], |
| 272 | + "source": [ |
| 273 | + "# Instead, we can use the function `predict_with_padding`, which will pad the image to a shape that fits the model.\n", |
| 274 | + "prediction = bioimageio.core.predict_with_padding(prediction_pipeline, cropped_array)\n", |
| 275 | + "show_images(cropped_image, prediction, names=[\"image\", \"prediction\"]) # show the prediction result" |
| 276 | + ] |
| 277 | + }, |
| 278 | + { |
| 279 | + "cell_type": "code", |
| 280 | + "execution_count": null, |
| 281 | + "id": "b2d6472e", |
| 282 | + "metadata": {}, |
| 283 | + "outputs": [], |
| 284 | + "source": [ |
| 285 | + "# There is also the function `predict_with_tiling`, which will run prediction for patches in a sliding window fashion.\n", |
| 286 | + "# This is especially helpful for large inputs that do not fit into the model as a single input.\n", |
| 287 | + "\n", |
| 288 | + "# The `tiling` argument is used to specify the tile size and the `halo`, which is the part of the patch\n", |
| 289 | + "# that is cropped in order to reduce boundary artifacts.\n", |
| 290 | + "# Alternatively, `tiling` can also be set to `True`, than the tile size and halo will be deduced from the model config\n", |
| 291 | + "# (this is also the default behavior when the `tiling` parameter is not passed).\n", |
| 292 | + "tiling = {\"tile\": {\"x\": 128, \"y\": 128}, \"halo\": {\"x\": 16, \"y\": 16}} # use a tile size of 128x128 and crop a halo of 16 pixels\n", |
| 293 | + "\n", |
| 294 | + "# if `verbose` is set to True a progress bar will be printed \n", |
| 295 | + "prediction = bioimageio.core.predict_with_tiling(prediction_pipeline, cropped_array, tiling=tiling, verbose=True)\n", |
| 296 | + "show_images(cropped_image, prediction, names=[\"image\", \"prediction\"]) " |
| 297 | + ] |
| 298 | + }, |
| 299 | + { |
| 300 | + "cell_type": "markdown", |
| 301 | + "id": "4ba91499", |
| 302 | + "metadata": {}, |
| 303 | + "source": [ |
| 304 | + "### Convenience prediction functions\n", |
| 305 | + "\n", |
| 306 | + "`bioimageio.core` also contains a few convenience functions to directly predict images that are stored on disc:\n", |
| 307 | + "- `predict_image` can be used to run prediction for a single image\n", |
| 308 | + "- `predict_images` to run prediction for many images" |
232 | 309 | ]
|
233 | 310 | },
|
234 | 311 | {
|
|
238 | 315 | "metadata": {},
|
239 | 316 | "outputs": [],
|
240 | 317 | "source": [
|
241 |
| - "# the utility function `predict_image` can be used to run prediction with an image stored on disc\n", |
| 318 | + "# The convenience function `predict_image` can be used to run prediction for an image stored on disc.\n", |
242 | 319 | "from bioimageio.core.prediction import predict_image\n",
|
243 | 320 | "\n",
|
244 |
| - "# the filepath where the output should be stored, supports most common image formats as well as npy fileformat\n", |
| 321 | + "# The filepath where the output should be stored; supports most common image formats as well as npy fileformat.\n", |
245 | 322 | "outputs = [\"prediction.tif\"]\n",
|
246 | 323 | "predict_image(\n",
|
247 | 324 | " model_resource, model_resource.test_inputs, outputs\n",
|
248 | 325 | ")\n",
|
249 | 326 | "\n",
|
250 |
| - "# the output tensor contains 2 channels, which is not supported by normal tif.\n", |
251 |
| - "# thus, these 2 channels are stored as 2 separate images\n", |
| 327 | + "# The output tensor contains 2 channels, which is not supported by normal tif.\n", |
| 328 | + "# Thus, these 2 channels are stored as 2 separate images.\n", |
252 | 329 | "fg_pred = imageio.imread(\"prediction-c0.tif\")\n",
|
253 | 330 | "bd_pred = imageio.imread(\"prediction-c1.tif\")\n",
|
254 |
| - "show_images(input_image, fg_pred, bd_pred,\n", |
255 |
| - " names=[\"image\", \"foreground-prediction\", \"boundary-prediction\"])" |
| 331 | + "show_images(input_image, fg_pred, bd_pred, names=[\"image\", \"foreground-prediction\", \"boundary-prediction\"])" |
256 | 332 | ]
|
257 | 333 | },
|
258 | 334 | {
|
|
262 | 338 | "metadata": {},
|
263 | 339 | "outputs": [],
|
264 | 340 | "source": [
|
265 |
| - "# the utility function `predict_images` can be use to run prediction for a batch of images stored on disc\n", |
266 |
| - "# note: this only works for models which have a single input and output!\n", |
| 341 | + "# The convenience function `predict_images` can be use to run prediction for many images stored on disc\n", |
| 342 | + "# Note: this only works for models which have a single input and output!\n", |
267 | 343 | "from bioimageio.core.prediction import predict_images\n",
|
268 | 344 | "\n",
|
269 |
| - "# here, we use a subset of the dsb challenge data for prediction from the stardist (https://github.com/stardist/stardist)\n", |
270 |
| - "# you can obtain it from: https://github.com/stardist/stardist/releases/download/0.1.0/dsb2018.zip\n", |
| 345 | + "# Here we use a small subset of the dsb challenge data for prediction.\n", |
| 346 | + "# The original data is available at https://github.com/stardist/stardist/releases/download/0.1.0/dsb2018.zip.\n", |
| 347 | + "# We have added a few images to the repository so that the notebook runs out of the box.\n", |
271 | 348 | "\n",
|
272 |
| - "# select all images in the \"test\" subfolder\n", |
| 349 | + "# Get all paths to the images in the \"example-images\" folder.\n", |
273 | 350 | "from glob import glob\n",
|
274 |
| - "folder = \"/home/pape/Downloads/dsb2018(1)/dsb2018/test\"\n", |
275 |
| - "inputs = glob(os.path.join(folder, \"images\", \"*.tif\"))\n", |
| 351 | + "inputs = glob(\"./example-images/*.png\")\n", |
276 | 352 | "\n",
|
277 |
| - "# create an output folder and specify the output path for each image\n", |
278 |
| - "output_folder = os.path.join(folder, \"predictions\")\n", |
| 353 | + "# Create an output folder and specify the output path for each image.\n", |
| 354 | + "output_folder = \"./predictions\"\n", |
279 | 355 | "os.makedirs(output_folder, exist_ok=True)\n",
|
280 | 356 | "outputs = [os.path.join(output_folder, os.path.split(inp)[1]) for inp in inputs]\n",
|
281 | 357 | "\n",
|
|
289 | 365 | "metadata": {},
|
290 | 366 | "outputs": [],
|
291 | 367 | "source": [
|
292 |
| - "# the model at hand can only predict images which have a xy-size that is\n", |
293 |
| - "# a multiple of 16. To run with arbitrary size images, we pass the `padding`\n", |
| 368 | + "# The model at hand can only predict images which have a spatial shape that is\n", |
| 369 | + "# a multiple of 16. To run with images of other sizes we pass the `padding`\n", |
294 | 370 | "# argument to `predict_images` and specify that the input is padded to the next bigger\n",
|
295 |
| - "# size that is divisible by 16 (mode: dynamic)\n", |
296 |
| - "# as an alternative `\"mode\": \"fixed\"` will pad to a fixed shape, e.g.\n", |
297 |
| - "# `{\"x\": 512, \"y\": 512, \"mode\": \"fixed\"}` will always pad to a size of 512x512\n", |
298 |
| - "# the padding is cropped again after the prediction\n", |
| 371 | + "# size that is divisible by 16 (mode: dynamic).\n", |
| 372 | + "# As an alternative `\"mode\": \"fixed\"` will pad to a fixed shape, e.g.\n", |
| 373 | + "# `{\"x\": 512, \"y\": 512, \"mode\": \"fixed\"}` will always pad to a size of 512x512.\n", |
| 374 | + "# The padding is cropped again after the prediction to restore the input shape.\n", |
299 | 375 | "padding = {\"x\": 16, \"y\": 16, \"mode\": \"dynamic\"}\n",
|
300 | 376 | "predict_images(\n",
|
301 | 377 | " model_resource, inputs, outputs, padding=padding, verbose=True\n",
|
302 | 378 | ")\n",
|
303 | 379 | "\n",
|
304 | 380 | "# check the first input/output\n",
|
305 |
| - "show_images(inputs[0], outputs[0].replace(\".tif\", \"-c0.tif\"), outputs[0].replace(\".tif\", \"-c1.tif\"))" |
| 381 | + "show_images(inputs[0], outputs[0].replace(\".png\", \"-c0.png\"), outputs[0].replace(\".png\", \"-c1.png\"))" |
306 | 382 | ]
|
307 | 383 | },
|
308 | 384 | {
|
|
312 | 388 | "metadata": {},
|
313 | 389 | "outputs": [],
|
314 | 390 | "source": [
|
315 |
| - "# instead of padding, we can also use tiling.\n", |
316 |
| - "# here, we specify a tile size of 224 and a halo (= extension of tile on both sides)\n", |
317 |
| - "# size of 16, which results in an effective tile shale of 256 = 224 + 2*16\n", |
| 391 | + "# Instead of padding, we can also use tiling.\n", |
| 392 | + "# Here, we specify a tile size of 256 and a halo (= what's cropped from the tile on either side) of 16.\n", |
318 | 393 | "tiling = {\n",
|
319 |
| - " \"tile\": {\"x\": 224, \"y\": 224},\n", |
| 394 | + " \"tile\": {\"x\": 256, \"y\": 256},\n", |
320 | 395 | " \"halo\": {\"x\": 16, \"y\": 16},\n",
|
321 | 396 | "}\n",
|
322 | 397 | "predict_images(\n",
|
323 | 398 | " model_resource, inputs, outputs, tiling=tiling, verbose=True\n",
|
324 | 399 | ")\n",
|
325 | 400 | "\n",
|
326 |
| - "# check the first input/output\n", |
327 |
| - "show_images(inputs[0], outputs[0].replace(\".tif\", \"-c0.tif\"), outputs[0].replace(\".tif\", \"-c1.tif\"))" |
| 401 | + "# Check the first input output pair.\n", |
| 402 | + "show_images(inputs[0], outputs[0].replace(\".png\", \"-c0.png\"), outputs[0].replace(\".png\", \"-c1.png\"))" |
328 | 403 | ]
|
329 | 404 | },
|
330 | 405 | {
|
|
537 | 612 | "name": "python",
|
538 | 613 | "nbconvert_exporter": "python",
|
539 | 614 | "pygments_lexer": "ipython3",
|
540 |
| - "version": "3.9.10" |
| 615 | + "version": "3.9.7" |
541 | 616 | }
|
542 | 617 | },
|
543 | 618 | "nbformat": 4,
|
|
0 commit comments