diff --git a/inference/core/version.py b/inference/core/version.py index ffc777a2da..b8a19e4ecc 100644 --- a/inference/core/version.py +++ b/inference/core/version.py @@ -1,4 +1,4 @@ -__version__ = "0.11.0" +__version__ = "0.11.1" if __name__ == "__main__": diff --git a/inference/core/workflows/core_steps/flow_control/condition.py b/inference/core/workflows/core_steps/flow_control/condition.py index 2f849d955f..f0e3f78948 100644 --- a/inference/core/workflows/core_steps/flow_control/condition.py +++ b/inference/core/workflows/core_steps/flow_control/condition.py @@ -83,7 +83,7 @@ class BlockManifest(WorkflowBlockManifest): set, ] = Field( description="Left operand of expression `left operator right` to evaluate boolean value of condition statement", - examples=["$steps.classification.top", 3, "some"], + examples=["$steps.classification.top", 3, "foo"], ) operator: Operator = Field( description="Operator in expression `left operator right` to evaluate boolean value of condition statement", @@ -117,7 +117,7 @@ class BlockManifest(WorkflowBlockManifest): set, ] = Field( description="Right operand of expression `left operator right` to evaluate boolean value of condition statement", - examples=["$steps.classification.top", 3, "some"], + examples=["$steps.classification.top", 3, "bar"], ) step_if_true: StepSelector = Field( description="Reference to step which shall be executed if expression evaluates to true", diff --git a/inference/core/workflows/core_steps/models/foundation/clip_comparison.py b/inference/core/workflows/core_steps/models/foundation/clip_comparison.py index a4aaaf356b..1550c1fa68 100644 --- a/inference/core/workflows/core_steps/models/foundation/clip_comparison.py +++ b/inference/core/workflows/core_steps/models/foundation/clip_comparison.py @@ -57,7 +57,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["ClipComparison"] name: str = Field(description="Unique name of step in workflows") images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - description="Reference at image to be used as input for step processing", + description="Reference an image to be used as input for step processing", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) @@ -112,13 +112,13 @@ async def run_locally( prompt_type="text", api_key=self._api_key, ) - doctr_model_id = load_core_model( + clip_model_id = load_core_model( model_manager=self._model_manager, inference_request=inference_request, core_model="clip", ) prediction = await self._model_manager.infer_from_request( - doctr_model_id, inference_request + clip_model_id, inference_request ) predictions.append(prediction.dict()) return self._post_process_result(image=images, predictions=predictions) diff --git a/inference/core/workflows/core_steps/models/foundation/lmm.py b/inference/core/workflows/core_steps/models/foundation/lmm.py index 68970eb083..bb3846a978 100644 --- a/inference/core/workflows/core_steps/models/foundation/lmm.py +++ b/inference/core/workflows/core_steps/models/foundation/lmm.py @@ -85,7 +85,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["LMM"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - description="Reference at image to be used as input for step processing", + description="Reference an image to be used as input for step processing", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) diff --git a/inference/core/workflows/core_steps/models/foundation/lmm_classifier.py b/inference/core/workflows/core_steps/models/foundation/lmm_classifier.py index c6c89fc9e2..eee5574b45 100644 --- a/inference/core/workflows/core_steps/models/foundation/lmm_classifier.py +++ b/inference/core/workflows/core_steps/models/foundation/lmm_classifier.py @@ -60,7 +60,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["LMMForClassification"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - description="Reference at image to be used as input for step processing", + description="Reference an image to be used as input for step processing", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) diff --git a/inference/core/workflows/core_steps/models/foundation/ocr.py b/inference/core/workflows/core_steps/models/foundation/ocr.py index ae5fb85266..96fdc6a3fe 100644 --- a/inference/core/workflows/core_steps/models/foundation/ocr.py +++ b/inference/core/workflows/core_steps/models/foundation/ocr.py @@ -58,7 +58,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["OCRModel"] name: str = Field(description="Unique name of step in workflows") images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - description="Reference at image to be used as input for step processing", + description="Reference an image to be used as input for step processing", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) diff --git a/inference/core/workflows/core_steps/models/foundation/yolo_world.py b/inference/core/workflows/core_steps/models/foundation/yolo_world.py index 02dafd4a59..7cff553e24 100644 --- a/inference/core/workflows/core_steps/models/foundation/yolo_world.py +++ b/inference/core/workflows/core_steps/models/foundation/yolo_world.py @@ -64,7 +64,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["YoloWorldModel", "YoloWorld"] name: str = Field(description="Unique name of step in workflows") images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - description="Reference at image to be used as input for step processing", + description="Reference an image to be used as input for step processing", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) diff --git a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation.py b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation.py index 00b3ff63be..f45b9cb5b0 100644 --- a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation.py +++ b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation.py @@ -68,7 +68,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["RoboflowInstanceSegmentationModel", "InstanceSegmentationModel"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - description="Reference at image to be used as input for step processing", + description="Reference an image to be used as input for step processing", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) diff --git a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection.py b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection.py index 33d8e4032b..53e6866829 100644 --- a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection.py +++ b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection.py @@ -67,7 +67,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["RoboflowKeypointDetectionModel", "KeypointsDetectionModel"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - description="Reference at image to be used as input for step processing", + description="Reference an image to be used as input for step processing", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification.py b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification.py index 4172168b60..6cdd0aa99d 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification.py @@ -61,7 +61,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["RoboflowClassificationModel", "ClassificationModel"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - description="Reference at image to be used as input for step processing", + description="Reference an image to be used as input for step processing", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification.py b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification.py index 78306fcc71..2349beba17 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification.py @@ -63,7 +63,7 @@ class BlockManifest(WorkflowBlockManifest): "RoboflowMultiLabelClassificationModel", "MultiLabelClassificationModel" ] images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - description="Reference at image to be used as input for step processing", + description="Reference an image to be used as input for step processing", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) diff --git a/inference/core/workflows/core_steps/models/roboflow/object_detection.py b/inference/core/workflows/core_steps/models/roboflow/object_detection.py index 09206ae693..5cf3cd9464 100644 --- a/inference/core/workflows/core_steps/models/roboflow/object_detection.py +++ b/inference/core/workflows/core_steps/models/roboflow/object_detection.py @@ -42,7 +42,7 @@ from inference_sdk import InferenceConfiguration, InferenceHTTPClient LONG_DESCRIPTION = """ -Run inference on a multi-label classification model hosted on or uploaded to Roboflow. +Run inference on a object-detection model hosted on or uploaded to Roboflow. You can query any model that is private to your account, or any public model available on [Roboflow Universe](https://universe.roboflow.com). @@ -65,7 +65,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["RoboflowObjectDetectionModel", "ObjectDetectionModel"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - description="Reference at image to be used as input for step processing", + description="Reference an image to be used as input for step processing", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) diff --git a/inference/core/workflows/core_steps/models/third_party/barcode_detection.py b/inference/core/workflows/core_steps/models/third_party/barcode_detection.py index 4dff0e4623..fa0f97c2c5 100644 --- a/inference/core/workflows/core_steps/models/third_party/barcode_detection.py +++ b/inference/core/workflows/core_steps/models/third_party/barcode_detection.py @@ -42,7 +42,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["BarcodeDetector", "BarcodeDetection"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - description="Reference at image to be used as input for step processing", + description="Reference an image to be used as input for step processing", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) diff --git a/inference/core/workflows/core_steps/models/third_party/qr_code_detection.py b/inference/core/workflows/core_steps/models/third_party/qr_code_detection.py index c7f5dfee9a..33c4afb852 100644 --- a/inference/core/workflows/core_steps/models/third_party/qr_code_detection.py +++ b/inference/core/workflows/core_steps/models/third_party/qr_code_detection.py @@ -41,7 +41,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["QRCodeDetector", "QRCodeDetection"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - description="Reference at image to be used as input for step processing", + description="Reference an image to be used as input for step processing", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) diff --git a/inference/core/workflows/core_steps/sinks/active_learning/data_collector.py b/inference/core/workflows/core_steps/sinks/active_learning/data_collector.py index d8e6ed0c98..67aa426afc 100644 --- a/inference/core/workflows/core_steps/sinks/active_learning/data_collector.py +++ b/inference/core/workflows/core_steps/sinks/active_learning/data_collector.py @@ -67,7 +67,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["ActiveLearningDataCollector"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - description="Reference at image to be used as input for step processing", + description="Reference an image to be used as input for step processing", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) diff --git a/inference/core/workflows/core_steps/transformations/absolute_static_crop.py b/inference/core/workflows/core_steps/transformations/absolute_static_crop.py index 1a93bc7f66..f316f4a268 100644 --- a/inference/core/workflows/core_steps/transformations/absolute_static_crop.py +++ b/inference/core/workflows/core_steps/transformations/absolute_static_crop.py @@ -52,7 +52,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["AbsoluteStaticCrop"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - description="Reference at image to be used as input for step processing", + description="Reference an image to be used as input for step processing", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) diff --git a/inference/core/workflows/core_steps/transformations/crop.py b/inference/core/workflows/core_steps/transformations/crop.py index 944543ca7f..0c9878404f 100644 --- a/inference/core/workflows/core_steps/transformations/crop.py +++ b/inference/core/workflows/core_steps/transformations/crop.py @@ -59,7 +59,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["Crop"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - description="Reference at image to be used as input for step processing", + description="Reference an image to be used as input for step processing", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) diff --git a/inference/core/workflows/core_steps/transformations/relative_static_crop.py b/inference/core/workflows/core_steps/transformations/relative_static_crop.py index 4f8eb0ab5a..ad62511536 100644 --- a/inference/core/workflows/core_steps/transformations/relative_static_crop.py +++ b/inference/core/workflows/core_steps/transformations/relative_static_crop.py @@ -54,7 +54,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["RelativeStaticCrop"] name: str = Field(description="Unique name of step in workflows") images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - description="Reference at image to be used as input for step processing", + description="Reference an image to be used as input for step processing", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) diff --git a/inference/core/workflows/execution_engine/executor/core.py b/inference/core/workflows/execution_engine/executor/core.py index 2b029e1311..7973c04dec 100644 --- a/inference/core/workflows/execution_engine/executor/core.py +++ b/inference/core/workflows/execution_engine/executor/core.py @@ -55,6 +55,7 @@ async def run_workflow( return construct_workflow_output( workflow_outputs=workflow.workflow_definition.outputs, execution_cache=execution_cache, + runtime_parameters=runtime_parameters, ) diff --git a/inference/core/workflows/execution_engine/executor/output_constructor.py b/inference/core/workflows/execution_engine/executor/output_constructor.py index f404e5440f..f956299156 100644 --- a/inference/core/workflows/execution_engine/executor/output_constructor.py +++ b/inference/core/workflows/execution_engine/executor/output_constructor.py @@ -5,6 +5,7 @@ from inference.core.workflows.execution_engine.compiler.utils import ( get_last_chunk_of_selector, get_step_selector_from_its_output, + is_input_selector, ) from inference.core.workflows.execution_engine.executor.execution_cache import ( ExecutionCache, @@ -14,9 +15,17 @@ def construct_workflow_output( workflow_outputs: List[JsonField], execution_cache: ExecutionCache, + runtime_parameters: Dict[str, Any], ) -> Dict[str, List[Any]]: result = {} for node in workflow_outputs: + if is_input_selector(selector_or_value=node.selector): + input_name = get_last_chunk_of_selector(selector=node.selector) + result[node.name] = runtime_parameters[input_name] + # above returns List[] + # for image input and value of parameter for singular input, we do not + # check parameter existence, as that should be checked by EE at compilation + continue step_selector = get_step_selector_from_its_output( step_output_selector=node.selector ) diff --git a/requirements/_requirements.txt b/requirements/_requirements.txt index 1a5c7dcff1..e8b7afb2dd 100644 --- a/requirements/_requirements.txt +++ b/requirements/_requirements.txt @@ -16,7 +16,7 @@ pybase64<2.0.0 scikit-image>=0.19.0 requests-toolbelt>=1.0.0 wheel>=0.38.1 -setuptools>=65.5.1 +setuptools>=65.5.1,<70.0.0 pytest-asyncio<=0.21.1 networkx>=3.1 pydantic~=2.6 diff --git a/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py b/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py index e94f579b92..fa55fb6bd4 100644 --- a/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py +++ b/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py @@ -1,3 +1,5 @@ +import numpy as np + from inference.core.workflows.entities.base import ( CoordinatesSystem, JsonField, @@ -56,7 +58,9 @@ def test_construct_response_when_field_needs_to_be_grabbed_from_nested_output_in # when result = construct_workflow_output( - workflow_outputs=workflow_outputs, execution_cache=execution_cache + workflow_outputs=workflow_outputs, + execution_cache=execution_cache, + runtime_parameters={}, ) # then @@ -98,7 +102,9 @@ def test_construct_response_when_field_needs_to_be_grabbed_from_nested_output_in # when result = construct_workflow_output( - workflow_outputs=workflow_outputs, execution_cache=execution_cache + workflow_outputs=workflow_outputs, + execution_cache=execution_cache, + runtime_parameters={}, ) # then @@ -121,7 +127,9 @@ def test_construct_response_when_step_output_is_missing_due_to_conditional_execu # when result = construct_workflow_output( - workflow_outputs=workflow_outputs, execution_cache=execution_cache + workflow_outputs=workflow_outputs, + execution_cache=execution_cache, + runtime_parameters={}, ) # then @@ -142,7 +150,9 @@ def test_construct_response_when_expected_step_property_is_missing() -> None: # when result = construct_workflow_output( - workflow_outputs=workflow_outputs, execution_cache=execution_cache + workflow_outputs=workflow_outputs, + execution_cache=execution_cache, + runtime_parameters={}, ) # then @@ -195,7 +205,9 @@ def test_construct_response_when_wildcard_selector_used_and_parent_coordinates_s # when result = construct_workflow_output( - workflow_outputs=workflow_outputs, execution_cache=execution_cache + workflow_outputs=workflow_outputs, + execution_cache=execution_cache, + runtime_parameters={}, ) # then @@ -264,7 +276,9 @@ def test_construct_response_when_wildcard_selector_used_and_own_coordinates_syst # when result = construct_workflow_output( - workflow_outputs=workflow_outputs, execution_cache=execution_cache + workflow_outputs=workflow_outputs, + execution_cache=execution_cache, + runtime_parameters={}, ) # then @@ -275,3 +289,80 @@ def test_construct_response_when_wildcard_selector_used_and_own_coordinates_syst ], "other": [{"predictions": ["g", "h", "i"]}], } + + +def test_construct_response_when_results_to_be_grabbed_from_inputs() -> None: + # given + execution_cache = ExecutionCache.init() + workflow_outputs = [ + JsonField(type="JsonField", name="some", selector="$inputs.image"), + JsonField(type="JsonField", name="other", selector="$inputs.confidence"), + ] + runtime_parameters = { + "image": [np.zeros((192, 168, 3), dtype=np.uint8)], + "confidence": 0.3, + } + + # when + result = construct_workflow_output( + workflow_outputs=workflow_outputs, + execution_cache=execution_cache, + runtime_parameters=runtime_parameters, + ) + + # then + assert len(result) == 2, "Expected 2 elements to be registered in the output" + assert ( + result["other"] == 0.3 + ), "Confidence value (without list wrapping) to be fetched from inputs and named `other`" + assert np.allclose( + result["some"], [np.zeros((192, 168, 3), dtype=np.uint8)] + ), "`some` output expected to carry one-element list of input image" + + +def test_construct_response_when_results_to_be_grabbed_from_inputs_and_step_outputs() -> ( + None +): + # given + execution_cache = ExecutionCache.init() + execution_cache.register_step( + step_name="a", + output_definitions=[OutputDefinition(name="predictions")], + compatible_with_batches=True, + ) + execution_cache.register_step_outputs( + step_name="a", + outputs=[ + {"predictions": ["a", "b", "c"]}, + {"predictions": ["d", "e", "f"]}, + ], + ) + workflow_outputs = [ + JsonField(type="JsonField", name="a", selector="$inputs.image"), + JsonField(type="JsonField", name="b", selector="$inputs.confidence"), + JsonField(type="JsonField", name="c", selector="$steps.a.predictions"), + ] + runtime_parameters = { + "image": [np.zeros((192, 168, 3), dtype=np.uint8)], + "confidence": 0.3, + } + + # when + result = construct_workflow_output( + workflow_outputs=workflow_outputs, + execution_cache=execution_cache, + runtime_parameters=runtime_parameters, + ) + + # then + assert len(result) == 3, "Expected 3 elements to be registered in the output" + assert ( + result["b"] == 0.3 + ), "Confidence value (without list wrapping) to be fetched from inputs and named `b`" + assert np.allclose( + result["a"], [np.zeros((192, 168, 3), dtype=np.uint8)] + ), "`a` output expected to carry one-element list of input image" + assert result["c"] == [ + ["a", "b", "c"], + ["d", "e", "f"], + ], "All predictions from step `c` expected to be registered under `c` output"