Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pass Inputs to Outputs when Wired #412

Merged
merged 7 commits into from
May 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion inference/core/version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "0.11.0"
__version__ = "0.11.1"


if __name__ == "__main__":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ class BlockManifest(WorkflowBlockManifest):
set,
] = Field(
description="Left operand of expression `left operator right` to evaluate boolean value of condition statement",
examples=["$steps.classification.top", 3, "some"],
examples=["$steps.classification.top", 3, "foo"],
)
operator: Operator = Field(
description="Operator in expression `left operator right` to evaluate boolean value of condition statement",
Expand Down Expand Up @@ -117,7 +117,7 @@ class BlockManifest(WorkflowBlockManifest):
set,
] = Field(
description="Right operand of expression `left operator right` to evaluate boolean value of condition statement",
examples=["$steps.classification.top", 3, "some"],
examples=["$steps.classification.top", 3, "bar"],
)
step_if_true: StepSelector = Field(
description="Reference to step which shall be executed if expression evaluates to true",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class BlockManifest(WorkflowBlockManifest):
type: Literal["ClipComparison"]
name: str = Field(description="Unique name of step in workflows")
images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="Reference an image to be used as input for step processing",
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
Expand Down Expand Up @@ -112,13 +112,13 @@ async def run_locally(
prompt_type="text",
api_key=self._api_key,
)
doctr_model_id = load_core_model(
clip_model_id = load_core_model(
model_manager=self._model_manager,
inference_request=inference_request,
core_model="clip",
)
prediction = await self._model_manager.infer_from_request(
doctr_model_id, inference_request
clip_model_id, inference_request
)
predictions.append(prediction.dict())
return self._post_process_result(image=images, predictions=predictions)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ class BlockManifest(WorkflowBlockManifest):
)
type: Literal["LMM"]
images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="Reference an image to be used as input for step processing",
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ class BlockManifest(WorkflowBlockManifest):
)
type: Literal["LMMForClassification"]
images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="Reference an image to be used as input for step processing",
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class BlockManifest(WorkflowBlockManifest):
type: Literal["OCRModel"]
name: str = Field(description="Unique name of step in workflows")
images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="Reference an image to be used as input for step processing",
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ class BlockManifest(WorkflowBlockManifest):
type: Literal["YoloWorldModel", "YoloWorld"]
name: str = Field(description="Unique name of step in workflows")
images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="Reference an image to be used as input for step processing",
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ class BlockManifest(WorkflowBlockManifest):
)
type: Literal["RoboflowInstanceSegmentationModel", "InstanceSegmentationModel"]
images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="Reference an image to be used as input for step processing",
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ class BlockManifest(WorkflowBlockManifest):
)
type: Literal["RoboflowKeypointDetectionModel", "KeypointsDetectionModel"]
images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="Reference an image to be used as input for step processing",
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ class BlockManifest(WorkflowBlockManifest):
)
type: Literal["RoboflowClassificationModel", "ClassificationModel"]
images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="Reference an image to be used as input for step processing",
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ class BlockManifest(WorkflowBlockManifest):
"RoboflowMultiLabelClassificationModel", "MultiLabelClassificationModel"
]
images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="Reference an image to be used as input for step processing",
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
from inference_sdk import InferenceConfiguration, InferenceHTTPClient

LONG_DESCRIPTION = """
Run inference on a multi-label classification model hosted on or uploaded to Roboflow.
Run inference on a object-detection model hosted on or uploaded to Roboflow.

You can query any model that is private to your account, or any public model available
on [Roboflow Universe](https://universe.roboflow.com).
Expand All @@ -65,7 +65,7 @@ class BlockManifest(WorkflowBlockManifest):
)
type: Literal["RoboflowObjectDetectionModel", "ObjectDetectionModel"]
images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="Reference an image to be used as input for step processing",
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ class BlockManifest(WorkflowBlockManifest):
)
type: Literal["BarcodeDetector", "BarcodeDetection"]
images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="Reference an image to be used as input for step processing",
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class BlockManifest(WorkflowBlockManifest):
)
type: Literal["QRCodeDetector", "QRCodeDetection"]
images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="Reference an image to be used as input for step processing",
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ class BlockManifest(WorkflowBlockManifest):
)
type: Literal["ActiveLearningDataCollector"]
images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="Reference an image to be used as input for step processing",
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class BlockManifest(WorkflowBlockManifest):
)
type: Literal["AbsoluteStaticCrop"]
images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="Reference an image to be used as input for step processing",
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ class BlockManifest(WorkflowBlockManifest):
)
type: Literal["Crop"]
images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="Reference an image to be used as input for step processing",
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ class BlockManifest(WorkflowBlockManifest):
type: Literal["RelativeStaticCrop"]
name: str = Field(description="Unique name of step in workflows")
images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field(
description="Reference at image to be used as input for step processing",
description="Reference an image to be used as input for step processing",
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ async def run_workflow(
return construct_workflow_output(
workflow_outputs=workflow.workflow_definition.outputs,
execution_cache=execution_cache,
runtime_parameters=runtime_parameters,
)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from inference.core.workflows.execution_engine.compiler.utils import (
get_last_chunk_of_selector,
get_step_selector_from_its_output,
is_input_selector,
)
from inference.core.workflows.execution_engine.executor.execution_cache import (
ExecutionCache,
Expand All @@ -14,9 +15,17 @@
def construct_workflow_output(
workflow_outputs: List[JsonField],
execution_cache: ExecutionCache,
runtime_parameters: Dict[str, Any],
) -> Dict[str, List[Any]]:
result = {}
for node in workflow_outputs:
if is_input_selector(selector_or_value=node.selector):
input_name = get_last_chunk_of_selector(selector=node.selector)
result[node.name] = runtime_parameters[input_name]
# above returns List[<image>]
# for image input and value of parameter for singular input, we do not
# check parameter existence, as that should be checked by EE at compilation
continue
step_selector = get_step_selector_from_its_output(
step_output_selector=node.selector
)
Expand Down
2 changes: 1 addition & 1 deletion requirements/_requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ pybase64<2.0.0
scikit-image>=0.19.0
requests-toolbelt>=1.0.0
wheel>=0.38.1
setuptools>=65.5.1
setuptools>=65.5.1,<70.0.0
pytest-asyncio<=0.21.1
networkx>=3.1
pydantic~=2.6
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import numpy as np

from inference.core.workflows.entities.base import (
CoordinatesSystem,
JsonField,
Expand Down Expand Up @@ -56,7 +58,9 @@ def test_construct_response_when_field_needs_to_be_grabbed_from_nested_output_in

# when
result = construct_workflow_output(
workflow_outputs=workflow_outputs, execution_cache=execution_cache
workflow_outputs=workflow_outputs,
execution_cache=execution_cache,
runtime_parameters={},
)

# then
Expand Down Expand Up @@ -98,7 +102,9 @@ def test_construct_response_when_field_needs_to_be_grabbed_from_nested_output_in

# when
result = construct_workflow_output(
workflow_outputs=workflow_outputs, execution_cache=execution_cache
workflow_outputs=workflow_outputs,
execution_cache=execution_cache,
runtime_parameters={},
)

# then
Expand All @@ -121,7 +127,9 @@ def test_construct_response_when_step_output_is_missing_due_to_conditional_execu

# when
result = construct_workflow_output(
workflow_outputs=workflow_outputs, execution_cache=execution_cache
workflow_outputs=workflow_outputs,
execution_cache=execution_cache,
runtime_parameters={},
)

# then
Expand All @@ -142,7 +150,9 @@ def test_construct_response_when_expected_step_property_is_missing() -> None:

# when
result = construct_workflow_output(
workflow_outputs=workflow_outputs, execution_cache=execution_cache
workflow_outputs=workflow_outputs,
execution_cache=execution_cache,
runtime_parameters={},
)

# then
Expand Down Expand Up @@ -195,7 +205,9 @@ def test_construct_response_when_wildcard_selector_used_and_parent_coordinates_s

# when
result = construct_workflow_output(
workflow_outputs=workflow_outputs, execution_cache=execution_cache
workflow_outputs=workflow_outputs,
execution_cache=execution_cache,
runtime_parameters={},
)

# then
Expand Down Expand Up @@ -264,7 +276,9 @@ def test_construct_response_when_wildcard_selector_used_and_own_coordinates_syst

# when
result = construct_workflow_output(
workflow_outputs=workflow_outputs, execution_cache=execution_cache
workflow_outputs=workflow_outputs,
execution_cache=execution_cache,
runtime_parameters={},
)

# then
Expand All @@ -275,3 +289,80 @@ def test_construct_response_when_wildcard_selector_used_and_own_coordinates_syst
],
"other": [{"predictions": ["g", "h", "i"]}],
}


def test_construct_response_when_results_to_be_grabbed_from_inputs() -> None:
# given
execution_cache = ExecutionCache.init()
workflow_outputs = [
JsonField(type="JsonField", name="some", selector="$inputs.image"),
JsonField(type="JsonField", name="other", selector="$inputs.confidence"),
]
runtime_parameters = {
"image": [np.zeros((192, 168, 3), dtype=np.uint8)],
"confidence": 0.3,
}

# when
result = construct_workflow_output(
workflow_outputs=workflow_outputs,
execution_cache=execution_cache,
runtime_parameters=runtime_parameters,
)

# then
assert len(result) == 2, "Expected 2 elements to be registered in the output"
assert (
result["other"] == 0.3
), "Confidence value (without list wrapping) to be fetched from inputs and named `other`"
assert np.allclose(
result["some"], [np.zeros((192, 168, 3), dtype=np.uint8)]
), "`some` output expected to carry one-element list of input image"


def test_construct_response_when_results_to_be_grabbed_from_inputs_and_step_outputs() -> (
None
):
# given
execution_cache = ExecutionCache.init()
execution_cache.register_step(
step_name="a",
output_definitions=[OutputDefinition(name="predictions")],
compatible_with_batches=True,
)
execution_cache.register_step_outputs(
step_name="a",
outputs=[
{"predictions": ["a", "b", "c"]},
{"predictions": ["d", "e", "f"]},
],
)
workflow_outputs = [
JsonField(type="JsonField", name="a", selector="$inputs.image"),
JsonField(type="JsonField", name="b", selector="$inputs.confidence"),
JsonField(type="JsonField", name="c", selector="$steps.a.predictions"),
]
runtime_parameters = {
"image": [np.zeros((192, 168, 3), dtype=np.uint8)],
"confidence": 0.3,
}

# when
result = construct_workflow_output(
workflow_outputs=workflow_outputs,
execution_cache=execution_cache,
runtime_parameters=runtime_parameters,
)

# then
assert len(result) == 3, "Expected 3 elements to be registered in the output"
assert (
result["b"] == 0.3
), "Confidence value (without list wrapping) to be fetched from inputs and named `b`"
assert np.allclose(
result["a"], [np.zeros((192, 168, 3), dtype=np.uint8)]
), "`a` output expected to carry one-element list of input image"
assert result["c"] == [
["a", "b", "c"],
["d", "e", "f"],
], "All predictions from step `c` expected to be registered under `c` output"
Loading