From 5e0d446837b2797cb1b5a4ffc19b6c991bfb3dd7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 28 Nov 2024 13:08:16 +0100 Subject: [PATCH 01/10] Add secret kind and basic secret store --- docker/dockerfiles/Dockerfile.onnx.lambda | 1 + .../dockerfiles/Dockerfile.onnx.lambda.slim | 1 + inference/core/env.py | 3 + .../core_steps/common/serializers.py | 11 +++ .../flow_control/rate_limiter/v1.py | 8 ++ inference/core/workflows/core_steps/loader.py | 9 +++ .../models/foundation/anthropic_claude/v1.py | 5 +- .../models/foundation/google_gemini/v1.py | 5 +- .../models/foundation/google_vision_ocr/v1.py | 5 +- .../core_steps/models/foundation/lmm/v1.py | 15 ++-- .../models/foundation/lmm_classifier/v1.py | 15 ++-- .../core_steps/models/foundation/openai/v1.py | 13 ++-- .../core_steps/models/foundation/openai/v2.py | 5 +- .../foundation/stability_ai/inpainting/v1.py | 5 +- .../core_steps/secrets_providers/__init__.py | 0 .../environment_secrets_store/__init__.py | 0 .../environment_secrets_store/v1.py | 77 +++++++++++++++++++ .../core_steps/sinks/email_notification/v1.py | 19 +++-- .../workflows/core_steps/sinks/webhook/v1.py | 7 ++ .../execution_engine/entities/types.py | 8 ++ .../workflows/execution_engine/v1/core.py | 2 +- 21 files changed, 181 insertions(+), 33 deletions(-) create mode 100644 inference/core/workflows/core_steps/secrets_providers/__init__.py create mode 100644 inference/core/workflows/core_steps/secrets_providers/environment_secrets_store/__init__.py create mode 100644 inference/core/workflows/core_steps/secrets_providers/environment_secrets_store/v1.py diff --git a/docker/dockerfiles/Dockerfile.onnx.lambda b/docker/dockerfiles/Dockerfile.onnx.lambda index 4bb7e2b17..59e30abe5 100644 --- a/docker/dockerfiles/Dockerfile.onnx.lambda +++ b/docker/dockerfiles/Dockerfile.onnx.lambda @@ -74,6 +74,7 @@ ENV ALLOW_CUSTOM_PYTHON_EXECUTION_IN_WORKFLOWS=False ENV CORE_MODEL_TROCR_ENABLED=false ENV USE_FILE_CACHE_FOR_WORKFLOWS_DEFINITIONS=False ENV ALLOW_WORKFLOW_BLOCKS_ACCESSING_LOCAL_STORAGE=False +ENV ALLOW_WORKFLOW_BLOCKS_ACCESSING_ENVIRONMENTAL_VARIABLES=False WORKDIR ${LAMBDA_TASK_ROOT} RUN rm -rf /build diff --git a/docker/dockerfiles/Dockerfile.onnx.lambda.slim b/docker/dockerfiles/Dockerfile.onnx.lambda.slim index 12c746b5e..7a1938db5 100644 --- a/docker/dockerfiles/Dockerfile.onnx.lambda.slim +++ b/docker/dockerfiles/Dockerfile.onnx.lambda.slim @@ -69,6 +69,7 @@ ENV CORE_MODEL_TROCR_ENABLED=false ENV ENABLE_WORKFLOWS_PROFILING=True ENV USE_FILE_CACHE_FOR_WORKFLOWS_DEFINITIONS=False ENV ALLOW_WORKFLOW_BLOCKS_ACCESSING_LOCAL_STORAGE=False +ENV ALLOW_WORKFLOW_BLOCKS_ACCESSING_ENVIRONMENTAL_VARIABLES=False WORKDIR ${LAMBDA_TASK_ROOT} diff --git a/inference/core/env.py b/inference/core/env.py index e7fb056c7..8263f4c58 100644 --- a/inference/core/env.py +++ b/inference/core/env.py @@ -458,6 +458,9 @@ ALLOW_WORKFLOW_BLOCKS_ACCESSING_LOCAL_STORAGE = str2bool( os.getenv("ALLOW_WORKFLOW_BLOCKS_ACCESSING_LOCAL_STORAGE", "True") ) +ALLOW_WORKFLOW_BLOCKS_ACCESSING_ENVIRONMENTAL_VARIABLES = str2bool( + os.getenv("ALLOW_WORKFLOW_BLOCKS_ACCESSING_ENVIRONMENTAL_VARIABLES", "True") +) WORKFLOW_BLOCKS_WRITE_DIRECTORY = os.getenv("WORKFLOW_BLOCKS_WRITE_DIRECTORY") DEDICATED_DEPLOYMENT_ID = os.getenv("DEDICATED_DEPLOYMENT_ID") diff --git a/inference/core/workflows/core_steps/common/serializers.py b/inference/core/workflows/core_steps/common/serializers.py index aa0cfea6f..187fbce8b 100644 --- a/inference/core/workflows/core_steps/common/serializers.py +++ b/inference/core/workflows/core_steps/common/serializers.py @@ -40,6 +40,8 @@ WorkflowImageData, ) +MIN_SECRET_LENGTH_TO_REVEAL_PREFIX = 8 + def serialise_sv_detections(detections: sv.Detections) -> dict: serialized_detections = [] @@ -180,3 +182,12 @@ def serialize_dict(elements: Dict[str, Any]) -> Dict[str, Any]: value = serialize_wildcard_kind(value=value) serialized_result[key] = value return serialized_result + + +def serialize_secret(secret: str) -> str: + if len(secret) < MIN_SECRET_LENGTH_TO_REVEAL_PREFIX: + return "*" * MIN_SECRET_LENGTH_TO_REVEAL_PREFIX + prefix = secret[:2] + infix = "*" * MIN_SECRET_LENGTH_TO_REVEAL_PREFIX + suffix = secret[-2:] + return f"{prefix}{infix}{suffix}" diff --git a/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py b/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py index df381e5dc..7ec6bc615 100644 --- a/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py +++ b/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py @@ -38,6 +38,14 @@ * adjust `cooldown_seconds` to specify what is the number of seconds that must be awaited before next time when `step_2` is fired + + +!!! warning "Cooldown limitations" + + Current implementation of cooldown is limited to video processing - using this block in context of a + Workflow that is run behind HTTP service (Roboflow Hosted API, Dedicated Deployment or self-hosted + `inference` server) will have no effect for processing HTTP requests. + """ diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 201bdc0cf..01187a1c3 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -91,6 +91,7 @@ from inference.core.workflows.core_steps.common.serializers import ( serialise_image, serialise_sv_detections, + serialize_secret, serialize_video_metadata_kind, serialize_wildcard_kind, ) @@ -217,6 +218,9 @@ from inference.core.workflows.core_steps.models.third_party.qr_code_detection.v1 import ( QRCodeDetectorBlockV1, ) +from inference.core.workflows.core_steps.secrets_providers.environment_secrets_store.v1 import ( + EnvironmentSecretsStoreBlockV1, +) from inference.core.workflows.core_steps.sinks.email_notification.v1 import ( EmailNotificationBlockV1, ) @@ -379,6 +383,7 @@ ROBOFLOW_API_KEY_KIND, ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, + SECRET_KIND, SERIALISED_PAYLOADS_KIND, STRING_KIND, TOP_CLASS_KIND, @@ -397,6 +402,7 @@ "thread_pool_executor": None, "allow_access_to_file_system": ALLOW_WORKFLOW_BLOCKS_ACCESSING_LOCAL_STORAGE, "allowed_write_directory": WORKFLOW_BLOCKS_WRITE_DIRECTORY, + "allow_access_to_environmental_variables": ALLOW_WORKFLOW_BLOCKS_ACCESSING_LOCAL_STORAGE, } KINDS_SERIALIZERS = { @@ -407,6 +413,7 @@ KEYPOINT_DETECTION_PREDICTION_KIND.name: serialise_sv_detections, QR_CODE_DETECTION_KIND.name: serialise_sv_detections, BAR_CODE_DETECTION_KIND.name: serialise_sv_detections, + SECRET_KIND.name: serialize_secret, WILDCARD_KIND.name: serialize_wildcard_kind, } KINDS_DESERIALIZERS = { @@ -552,6 +559,7 @@ def load_blocks() -> List[Type[WorkflowBlock]]: RoboflowObjectDetectionModelBlockV2, VLMAsClassifierBlockV2, VLMAsDetectorBlockV2, + EnvironmentSecretsStoreBlockV1, ] @@ -591,4 +599,5 @@ def load_kinds() -> List[Kind]: IMAGE_METADATA_KIND, BYTES_KIND, INFERENCE_ID_KIND, + SECRET_KIND, ] diff --git a/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py b/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py index 69a229c6d..952ef993e 100644 --- a/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py @@ -25,6 +25,7 @@ INTEGER_KIND, LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, + SECRET_KIND, STRING_KIND, ImageInputField, Selector, @@ -149,7 +150,7 @@ class BlockManifest(WorkflowBlockManifest): }, }, ) - api_key: Union[Selector(kind=[STRING_KIND]), str] = Field( + api_key: Union[Selector(kind=[STRING_KIND, SECRET_KIND]), str] = Field( description="Your Antropic API key", examples=["xxx-xxx", "$inputs.antropics_api_key"], private=True, @@ -220,7 +221,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.3.0,<2.0.0" + return ">=1.4.0,<2.0.0" class AntropicClaudeBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py b/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py index 99d4e4608..313cb1eb2 100644 --- a/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py @@ -23,6 +23,7 @@ IMAGE_KIND, LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, + SECRET_KIND, STRING_KIND, ImageInputField, Selector, @@ -158,7 +159,7 @@ class BlockManifest(WorkflowBlockManifest): }, }, ) - api_key: Union[Selector(kind=[STRING_KIND]), str] = Field( + api_key: Union[Selector(kind=[STRING_KIND, SECRET_KIND]), str] = Field( description="Your Google AI API key", examples=["xxx-xxx", "$inputs.google_api_key"], private=True, @@ -223,7 +224,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.3.0,<2.0.0" + return ">=1.4.0,<2.0.0" class GoogleGeminiBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py b/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py index 5fa0158e1..2fd8431f6 100644 --- a/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py @@ -22,6 +22,7 @@ from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, OBJECT_DETECTION_PREDICTION_KIND, + SECRET_KIND, STRING_KIND, Selector, ) @@ -79,7 +80,7 @@ class BlockManifest(WorkflowBlockManifest): }, }, ) - api_key: Union[Selector(kind=[STRING_KIND]), str] = Field( + api_key: Union[Selector(kind=[STRING_KIND, SECRET_KIND]), str] = Field( description="Your Google Vision API key", examples=["xxx-xxx", "$inputs.google_api_key"], private=True, @@ -97,7 +98,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.3.0,<2.0.0" + return ">=1.4.0,<2.0.0" class GoogleVisionOCRBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/lmm/v1.py b/inference/core/workflows/core_steps/models/foundation/lmm/v1.py index 0234c3b75..1396cf89e 100644 --- a/inference/core/workflows/core_steps/models/foundation/lmm/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/lmm/v1.py @@ -34,6 +34,7 @@ IMAGE_KIND, IMAGE_METADATA_KIND, PARENT_ID_KIND, + SECRET_KIND, STRING_KIND, WILDCARD_KIND, ImageInputField, @@ -112,11 +113,13 @@ class BlockManifest(WorkflowBlockManifest): } ], ) - remote_api_key: Union[Selector(kind=[STRING_KIND]), Optional[str]] = Field( - default=None, - description="Holds API key required to call LMM model - in current state of development, we require OpenAI key when `lmm_type=gpt_4v` and do not require additional API key for CogVLM calls.", - examples=["xxx-xxx", "$inputs.api_key"], - private=True, + remote_api_key: Union[Selector(kind=[STRING_KIND, SECRET_KIND]), Optional[str]] = ( + Field( + default=None, + description="Holds API key required to call LMM model - in current state of development, we require OpenAI key when `lmm_type=gpt_4v` and do not require additional API key for CogVLM calls.", + examples=["xxx-xxx", "$inputs.api_key"], + private=True, + ) ) json_output: Optional[Dict[str, str]] = Field( default=None, @@ -155,7 +158,7 @@ def get_actual_outputs(self) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.3.0,<2.0.0" + return ">=1.4.0,<2.0.0" class LMMBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py b/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py index fe3d8ee33..5c7e1e140 100644 --- a/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py @@ -28,6 +28,7 @@ LIST_OF_VALUES_KIND, PARENT_ID_KIND, PREDICTION_TYPE_KIND, + SECRET_KIND, STRING_KIND, TOP_CLASS_KIND, ImageInputField, @@ -86,11 +87,13 @@ class BlockManifest(WorkflowBlockManifest): } ], ) - remote_api_key: Union[Selector(kind=[STRING_KIND]), Optional[str]] = Field( - default=None, - description="Holds API key required to call LMM model - in current state of development, we require OpenAI key when `lmm_type=gpt_4v` and do not require additional API key for CogVLM calls.", - examples=["xxx-xxx", "$inputs.api_key"], - private=True, + remote_api_key: Union[Selector(kind=[STRING_KIND, SECRET_KIND]), Optional[str]] = ( + Field( + default=None, + description="Holds API key required to call LMM model - in current state of development, we require OpenAI key when `lmm_type=gpt_4v` and do not require additional API key for CogVLM calls.", + examples=["xxx-xxx", "$inputs.api_key"], + private=True, + ) ) @classmethod @@ -110,7 +113,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.3.0,<2.0.0" + return ">=1.4.0,<2.0.0" class LMMForClassificationBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/openai/v1.py b/inference/core/workflows/core_steps/models/foundation/openai/v1.py index d9f10d170..49df08772 100644 --- a/inference/core/workflows/core_steps/models/foundation/openai/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/openai/v1.py @@ -25,6 +25,7 @@ IMAGE_KIND, IMAGE_METADATA_KIND, PARENT_ID_KIND, + SECRET_KIND, STRING_KIND, WILDCARD_KIND, ImageInputField, @@ -75,10 +76,12 @@ class BlockManifest(WorkflowBlockManifest): description="Text prompt to the OpenAI model", examples=["my prompt", "$inputs.prompt"], ) - openai_api_key: Union[Selector(kind=[STRING_KIND]), Optional[str]] = Field( - description="Your OpenAI API key", - examples=["xxx-xxx", "$inputs.openai_api_key"], - private=True, + openai_api_key: Union[Selector(kind=[STRING_KIND, SECRET_KIND]), Optional[str]] = ( + Field( + description="Your OpenAI API key", + examples=["xxx-xxx", "$inputs.openai_api_key"], + private=True, + ) ) openai_model: Union[ Selector(kind=[STRING_KIND]), Literal["gpt-4o", "gpt-4o-mini"] @@ -139,7 +142,7 @@ def get_actual_outputs(self) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.3.0,<2.0.0" + return ">=1.4.0,<2.0.0" class OpenAIBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/openai/v2.py b/inference/core/workflows/core_steps/models/foundation/openai/v2.py index 1f9d03aca..1a4e8b418 100644 --- a/inference/core/workflows/core_steps/models/foundation/openai/v2.py +++ b/inference/core/workflows/core_steps/models/foundation/openai/v2.py @@ -22,6 +22,7 @@ IMAGE_KIND, LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, + SECRET_KIND, STRING_KIND, ImageInputField, Selector, @@ -147,7 +148,7 @@ class BlockManifest(WorkflowBlockManifest): }, }, ) - api_key: Union[Selector(kind=[STRING_KIND]), str] = Field( + api_key: Union[Selector(kind=[STRING_KIND, SECRET_KIND]), str] = Field( description="Your OpenAI API key", examples=["xxx-xxx", "$inputs.openai_api_key"], private=True, @@ -218,7 +219,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.3.0,<2.0.0" + return ">=1.4.0,<2.0.0" class OpenAIBlockV2(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py b/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py index 6f9d87796..686d0fbe3 100644 --- a/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py @@ -18,6 +18,7 @@ from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, + SECRET_KIND, STRING_KIND, Selector, ) @@ -89,7 +90,7 @@ class BlockManifest(WorkflowBlockManifest): description="Negative prompt to inpainting model (what you do not wish to see)", examples=["my prompt", "$inputs.prompt"], ) - api_key: Union[Selector(kind=[STRING_KIND]), str] = Field( + api_key: Union[Selector(kind=[STRING_KIND, SECRET_KIND]), str] = Field( description="Your Stability AI API key", examples=["xxx-xxx", "$inputs.stability_ai_api_key"], private=True, @@ -103,7 +104,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.3.0,<2.0.0" + return ">=1.4.0,<2.0.0" class StabilityAIInpaintingBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/secrets_providers/__init__.py b/inference/core/workflows/core_steps/secrets_providers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/inference/core/workflows/core_steps/secrets_providers/environment_secrets_store/__init__.py b/inference/core/workflows/core_steps/secrets_providers/environment_secrets_store/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/inference/core/workflows/core_steps/secrets_providers/environment_secrets_store/v1.py b/inference/core/workflows/core_steps/secrets_providers/environment_secrets_store/v1.py new file mode 100644 index 000000000..8cd08f9cd --- /dev/null +++ b/inference/core/workflows/core_steps/secrets_providers/environment_secrets_store/v1.py @@ -0,0 +1,77 @@ +import os +from typing import List, Literal, Optional, Type + +from pydantic import ConfigDict, Field + +from inference.core.workflows.execution_engine.entities.base import OutputDefinition +from inference.core.workflows.execution_engine.entities.types import SECRET_KIND +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) + + +class BlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "name": "Environment Secrets Store", + "version": "v1", + "short_description": "Fetches secrets from environmental variables", + "long_description": "TODO", + "license": "Apache-2.0", + "block_type": "secrets_provider", + } + ) + type: Literal["roboflow_core/environment_secrets_store@v1"] + variables_storing_secrets: List[str] = Field( + description="List with names of environment variables to fetch. Each will create separate block output.", + examples=[["MY_API_KEY", "OTHER_API_KEY"]], + min_items=1, + ) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="*")] + + def get_actual_outputs(self) -> List[OutputDefinition]: + return [ + OutputDefinition(name=variable_name.lower(), kind=SECRET_KIND) + for variable_name in self.variables_storing_secrets + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.4.0,<2.0.0" + + +class EnvironmentSecretsStoreBlockV1(WorkflowBlock): + + def __init__(self, allow_access_to_environmental_variables: bool): + super().__init__() + self._allow_access_to_environmental_variables = ( + allow_access_to_environmental_variables + ) + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockManifest + + @classmethod + def get_init_parameters(cls) -> List[str]: + return ["allow_access_to_environmental_variables"] + + def run( + self, + variables_storing_secrets: List[str], + ) -> BlockResult: + if not self._allow_access_to_environmental_variables: + raise RuntimeError( + "`roboflow_core/environment_secrets_store@v1` block cannot run in this environment - " + "access to environment variables is forbidden - use self-hosted `inference` or " + "Roboflow Dedicated Deployment." + ) + return { + variable_name.lower(): os.getenv(variable_name) + for variable_name in variables_storing_secrets + } diff --git a/inference/core/workflows/core_steps/sinks/email_notification/v1.py b/inference/core/workflows/core_steps/sinks/email_notification/v1.py index 7fa250823..e6a2651ca 100644 --- a/inference/core/workflows/core_steps/sinks/email_notification/v1.py +++ b/inference/core/workflows/core_steps/sinks/email_notification/v1.py @@ -28,6 +28,7 @@ BYTES_KIND, INTEGER_KIND, LIST_OF_VALUES_KIND, + SECRET_KIND, STRING_KIND, Selector, ) @@ -124,6 +125,12 @@ During cooldown period, consecutive runs of the step will cause `throttling_status` output to be set `True` and no notification will be sent. +!!! warning "Cooldown limitations" + + Current implementation of cooldown is limited to video processing - using this block in context of a + Workflow that is run behind HTTP service (Roboflow Hosted API, Dedicated Deployment or self-hosted + `inference` server) will have no effect for processing HTTP requests. + ### Attachments @@ -244,10 +251,12 @@ class BlockManifest(WorkflowBlockManifest): description="Custom SMTP server to be used", examples=["$inputs.smtp_server", "smtp.google.com"], ) - sender_email_password: Union[str, Selector(kind=[STRING_KIND])] = Field( - description="Sender e-mail password be used when authenticating to SMTP server", - private=True, - examples=["$inputs.email_password"], + sender_email_password: Union[str, Selector(kind=[STRING_KIND, SECRET_KIND])] = ( + Field( + description="Sender e-mail password be used when authenticating to SMTP server", + private=True, + examples=["$inputs.email_password"], + ) ) smtp_port: int = Field( default=465, @@ -298,7 +307,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.3.0,<2.0.0" + return ">=1.4.0,<2.0.0" class EmailNotificationBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/sinks/webhook/v1.py b/inference/core/workflows/core_steps/sinks/webhook/v1.py index 3652b25fc..5d9569260 100644 --- a/inference/core/workflows/core_steps/sinks/webhook/v1.py +++ b/inference/core/workflows/core_steps/sinks/webhook/v1.py @@ -112,6 +112,13 @@ During cooldown period, consecutive runs of the step will cause `throttling_status` output to be set `True` and no notification will be sent. +!!! warning "Cooldown limitations" + + Current implementation of cooldown is limited to video processing - using this block in context of a + Workflow that is run behind HTTP service (Roboflow Hosted API, Dedicated Deployment or self-hosted + `inference` server) will have no effect for processing HTTP requests. + + ### Async execution Configure the `fire_and_forget` property. Set it to True if you want the request to be sent in the background, diff --git a/inference/core/workflows/execution_engine/entities/types.py b/inference/core/workflows/execution_engine/entities/types.py index aac3a76c7..d70948857 100644 --- a/inference/core/workflows/execution_engine/entities/types.py +++ b/inference/core/workflows/execution_engine/entities/types.py @@ -1040,6 +1040,14 @@ def __hash__(self) -> int: internal_data_type="str", ) +SECRET_KIND = Kind( + name="secret", + description="Secret value", + docs="This kind represents a secret - password or other credential that should remain confidential.", + serialised_data_type="str", + internal_data_type="str", +) + STEP_AS_SELECTED_ELEMENT = "step" STEP_OUTPUT_AS_SELECTED_ELEMENT = "step_output" diff --git a/inference/core/workflows/execution_engine/v1/core.py b/inference/core/workflows/execution_engine/v1/core.py index 7135fdb36..63ca12c1c 100644 --- a/inference/core/workflows/execution_engine/v1/core.py +++ b/inference/core/workflows/execution_engine/v1/core.py @@ -21,7 +21,7 @@ validate_runtime_input, ) -EXECUTION_ENGINE_V1_VERSION = Version("1.3.0") +EXECUTION_ENGINE_V1_VERSION = Version("1.4.0") class ExecutionEngineV1(BaseExecutionEngine): From e53267390b2f279823f16fdbaaf1b8e3f4de31e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 28 Nov 2024 14:25:35 +0100 Subject: [PATCH 02/10] Add blocks with Slack and Twilio SMS notifications --- inference/core/workflows/core_steps/loader.py | 8 + .../core_steps/sinks/slack/__init__.py | 0 .../sinks/slack/notification/__init__.py | 0 .../core_steps/sinks/slack/notification/v1.py | 341 +++++++++++++++++ .../core_steps/sinks/twilio/__init__.py | 0 .../core_steps/sinks/twilio/sms/__init__.py | 0 .../core_steps/sinks/twilio/sms/v1.py | 344 ++++++++++++++++++ requirements/_requirements.txt | 4 +- 8 files changed, 696 insertions(+), 1 deletion(-) create mode 100644 inference/core/workflows/core_steps/sinks/slack/__init__.py create mode 100644 inference/core/workflows/core_steps/sinks/slack/notification/__init__.py create mode 100644 inference/core/workflows/core_steps/sinks/slack/notification/v1.py create mode 100644 inference/core/workflows/core_steps/sinks/twilio/__init__.py create mode 100644 inference/core/workflows/core_steps/sinks/twilio/sms/__init__.py create mode 100644 inference/core/workflows/core_steps/sinks/twilio/sms/v1.py diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 01187a1c3..d72daa36b 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -237,6 +237,12 @@ from inference.core.workflows.core_steps.sinks.roboflow.model_monitoring_inference_aggregator.v1 import ( ModelMonitoringInferenceAggregatorBlockV1, ) +from inference.core.workflows.core_steps.sinks.slack.notification.v1 import ( + SlackNotificationBlockV1, +) +from inference.core.workflows.core_steps.sinks.twilio.sms.v1 import ( + TwilioSMSNotificationBlockV1, +) from inference.core.workflows.core_steps.sinks.webhook.v1 import WebhookSinkBlockV1 from inference.core.workflows.core_steps.transformations.absolute_static_crop.v1 import ( AbsoluteStaticCropBlockV1, @@ -560,6 +566,8 @@ def load_blocks() -> List[Type[WorkflowBlock]]: VLMAsClassifierBlockV2, VLMAsDetectorBlockV2, EnvironmentSecretsStoreBlockV1, + SlackNotificationBlockV1, + TwilioSMSNotificationBlockV1, ] diff --git a/inference/core/workflows/core_steps/sinks/slack/__init__.py b/inference/core/workflows/core_steps/sinks/slack/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/inference/core/workflows/core_steps/sinks/slack/notification/__init__.py b/inference/core/workflows/core_steps/sinks/slack/notification/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/inference/core/workflows/core_steps/sinks/slack/notification/v1.py b/inference/core/workflows/core_steps/sinks/slack/notification/v1.py new file mode 100644 index 000000000..332edc034 --- /dev/null +++ b/inference/core/workflows/core_steps/sinks/slack/notification/v1.py @@ -0,0 +1,341 @@ +import hashlib +import logging +import re +from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor +from datetime import datetime +from functools import partial +from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union + +from fastapi import BackgroundTasks +from pydantic import ConfigDict, Field, field_validator +from slack_sdk import WebClient +from slack_sdk.errors import SlackApiError + +from inference.core.cache.base import BaseCache +from inference.core.workflows.core_steps.common.query_language.entities.operations import ( + AllOperationsType, +) +from inference.core.workflows.core_steps.common.query_language.operations.core import ( + build_operations_chain, +) +from inference.core.workflows.execution_engine.entities.base import OutputDefinition +from inference.core.workflows.execution_engine.entities.types import ( + BOOLEAN_KIND, + BYTES_KIND, + INTEGER_KIND, + SECRET_KIND, + STRING_KIND, + Selector, +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) + +CACHE_EXPIRE_TIME = 15 * 60 + +LONG_DESCRIPTION = """ +""" + +PARAMETER_REGEX = re.compile(r"({{\s*\$parameters\.(\w+)\s*}})") + + +class BlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "name": "Slack Notification", + "version": "v1", + "short_description": "Send notification via Slack", + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "sink", + } + ) + type: Literal["roboflow_core/slack_notification@v1"] + slack_token: Union[str, Selector(kind=[STRING_KIND, SECRET_KIND])] = Field( + description="Slack Token. Visit " + "Slack docks " + "to find out how to generate the token.", + private=True, + examples=["$inputs.slack_token"], + ) + message: str = Field( + description="Content of the message to be send", + examples=[ + "During last 5 minutes detected \{\{ $parameters.num_instances \}\} instances" + ], + ) + channel: Union[str, Selector(kind=[STRING_KIND])] = Field( + description="Identifier of Slack channel", + examples=["$inputs.slack_channel_id"], + ) + message_parameters: Dict[ + str, + Union[Selector(), Selector(), str, int, float, bool], + ] = Field( + description="References data to be used to construct each and every column", + examples=[ + { + "predictions": "$steps.model.predictions", + "reference": "$inputs.reference_class_names", + } + ], + default_factory=dict, + ) + message_parameters_operations: Dict[str, List[AllOperationsType]] = Field( + description="UQL definitions of operations to be performed on defined data w.r.t. each message parameter", + examples=[ + { + "predictions": [ + {"type": "DetectionsPropertyExtract", "property_name": "class_name"} + ] + } + ], + default_factory=dict, + ) + attachments: Dict[str, Selector(kind=[STRING_KIND, BYTES_KIND])] = Field( + description="Attachments", + default_factory=dict, + examples=[{"report.csv": "$steps.csv_formatter.csv_content"}], + ) + fire_and_forget: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( + default=True, + description="Boolean flag dictating if sink is supposed to be executed in the background, " + "not waiting on status of registration before end of workflow run. Use `True` if best-effort " + "registration is needed, use `False` while debugging and if error handling is needed", + examples=["$inputs.fire_and_forget", False], + ) + disable_sink: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( + default=False, + description="boolean flag that can be also reference to input - to arbitrarily disable " + "data collection for specific request", + examples=[False, "$inputs.disable_slack_notifications"], + ) + cooldown_seconds: Union[int, Selector(kind=[INTEGER_KIND])] = Field( + default=5, + description="Number of seconds to wait until follow-up notification can be sent. " + f"Maximum value: {CACHE_EXPIRE_TIME} seconds (15 minutes)", + examples=["$inputs.cooldown_seconds", 3], + json_schema_extra={ + "always_visible": True, + }, + ) + cooldown_session_key: str = Field( + description="Unique key used internally to implement cooldown. Must be unique for each step in your Workflow.", + examples=["session-1v73kdhfse"], + json_schema_extra={"hidden": True}, + ) + + @field_validator("cooldown_seconds") + @classmethod + def ensure_cooldown_seconds_within_bounds(cls, value: Any) -> dict: + if isinstance(value, int) and (value < 0 or value > CACHE_EXPIRE_TIME): + raise ValueError( + f"`cooldown_seconds` must be in range [0, {CACHE_EXPIRE_TIME}]" + ) + return value + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="error_status", kind=[BOOLEAN_KIND]), + OutputDefinition(name="throttling_status", kind=[BOOLEAN_KIND]), + OutputDefinition(name="message", kind=[STRING_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.4.0,<2.0.0" + + +class SlackNotificationBlockV1(WorkflowBlock): + + def __init__( + self, + cache: BaseCache, + background_tasks: Optional[BackgroundTasks], + thread_pool_executor: Optional[ThreadPoolExecutor], + ): + self._cache = cache + self._background_tasks = background_tasks + self._thread_pool_executor = thread_pool_executor + self._clients: Dict[str, WebClient] = {} + + @classmethod + def get_init_parameters(cls) -> List[str]: + return ["cache", "background_tasks", "thread_pool_executor"] + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockManifest + + def run( + self, + slack_token: str, + message: str, + channel: str, + message_parameters: Dict[str, Any], + message_parameters_operations: Dict[str, List[AllOperationsType]], + attachments: Dict[str, str], + fire_and_forget: bool, + disable_sink: bool, + cooldown_seconds: int, + cooldown_session_key: str, + ) -> BlockResult: + if disable_sink: + return { + "error_status": False, + "throttling_status": False, + "message": "Sink was disabled by parameter `disable_sink`", + } + token_hash = hashlib.sha256(slack_token.encode("utf-8")).hexdigest() + cache_key = _generate_cache_key_for_cooldown_session( + session_key=cooldown_session_key, + token_hash=token_hash, + ) + last_notification_fired = self._cache.get(cache_key) + seconds_since_last_notification = cooldown_seconds + if last_notification_fired is not None: + last_notification_fired = datetime.fromisoformat(last_notification_fired) + seconds_since_last_notification = ( + datetime.now() - last_notification_fired + ).total_seconds() + if seconds_since_last_notification < cooldown_seconds: + logging.info(f"Activated `roboflow_core/slack_notification@v1` cooldown.") + return { + "error_status": False, + "throttling_status": True, + "message": "Sink cooldown applies", + } + if token_hash not in self._clients: + self._clients[token_hash] = WebClient(token=slack_token) + client = self._clients[token_hash] + message = format_message( + message=message, + message_parameters=message_parameters, + message_parameters_operations=message_parameters_operations, + ) + send_notification_handler = partial( + send_slack_notification, + client=client, + channel=channel, + message=message, + attachments=attachments, + ) + last_notification_fired = datetime.now().isoformat() + self._cache.set( + key=cache_key, value=last_notification_fired, expire=CACHE_EXPIRE_TIME + ) + if fire_and_forget and self._background_tasks: + self._background_tasks.add_task(send_notification_handler) + return { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + if fire_and_forget and self._thread_pool_executor: + self._thread_pool_executor.submit(send_notification_handler) + return { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + error_status, message = send_notification_handler() + return { + "error_status": error_status, + "throttling_status": False, + "message": message, + } + + +def _generate_cache_key_for_cooldown_session(session_key: str, token_hash: str) -> str: + return ( + f"workflows:steps_cache:roboflow_core/slack_notification@v1:" + f"{token_hash}:{session_key}:last_notification_time" + ) + + +def format_message( + message: str, + message_parameters: Dict[str, Any], + message_parameters_operations: Dict[str, List[AllOperationsType]], +) -> str: + matching_parameters = PARAMETER_REGEX.findall(message) + parameters_to_get_values = { + p[1] for p in matching_parameters if p[1] in message_parameters + } + parameters_values = {} + for parameter_name in parameters_to_get_values: + parameter_value = message_parameters[parameter_name] + operations = message_parameters_operations.get(parameter_name) + if not operations: + parameters_values[parameter_name] = parameter_value + continue + operations_chain = build_operations_chain(operations=operations) + parameters_values[parameter_name] = operations_chain( + parameter_value, global_parameters={} + ) + parameter_to_placeholders = defaultdict(list) + for placeholder, parameter_name in matching_parameters: + if parameter_name not in parameters_to_get_values: + continue + parameter_to_placeholders[parameter_name].append(placeholder) + for parameter_name, placeholders in parameter_to_placeholders.items(): + for placeholder in placeholders: + message = message.replace( + placeholder, str(parameters_values[parameter_name]) + ) + return message + + +def send_slack_notification( + client: WebClient, + channel: str, + message: str, + attachments: Dict[str, Union[str, bytes]], +) -> Tuple[bool, str]: + try: + _send_slack_notification( + client=client, + channel=channel, + message=message, + attachments=attachments, + ) + return False, "Notification sent successfully" + except SlackApiError as error: + error_details = error.response.get("error", "Not Available.") + logging.warning(f"Could not send Slack notification. Error: {error_details}") + return ( + True, + f"Failed to Slack notification. Internal error details: {error_details}", + ) + except Exception as error: + logging.warning(f"Could not send Slack notification. Error: {str(error)}") + return True, f"Failed to Slack notification. Internal error details: {error}" + + +def _send_slack_notification( + client: WebClient, + channel: str, + message: str, + attachments: Dict[str, Union[str, bytes]], +) -> None: + if not attachments: + _ = client.chat_postMessage( + channel=channel, + text=message, + ) + file_uploads = [ + { + "title": name, + "content": value, + } + for name, value in attachments.items() + ] + _ = client.files_upload_v2( + channel=channel, + initial_comment=message, + file_uploads=file_uploads, + ) diff --git a/inference/core/workflows/core_steps/sinks/twilio/__init__.py b/inference/core/workflows/core_steps/sinks/twilio/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/inference/core/workflows/core_steps/sinks/twilio/sms/__init__.py b/inference/core/workflows/core_steps/sinks/twilio/sms/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/inference/core/workflows/core_steps/sinks/twilio/sms/v1.py b/inference/core/workflows/core_steps/sinks/twilio/sms/v1.py new file mode 100644 index 000000000..1dfcf36e7 --- /dev/null +++ b/inference/core/workflows/core_steps/sinks/twilio/sms/v1.py @@ -0,0 +1,344 @@ +import hashlib +import logging +import re +from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor +from datetime import datetime +from functools import partial +from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union + +from fastapi import BackgroundTasks +from pydantic import ConfigDict, Field, field_validator +from twilio.rest import Client + +from inference.core.cache.base import BaseCache +from inference.core.workflows.core_steps.common.query_language.entities.operations import ( + AllOperationsType, +) +from inference.core.workflows.core_steps.common.query_language.operations.core import ( + build_operations_chain, +) +from inference.core.workflows.execution_engine.entities.base import OutputDefinition +from inference.core.workflows.execution_engine.entities.types import ( + BOOLEAN_KIND, + INTEGER_KIND, + SECRET_KIND, + STRING_KIND, + Selector, +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) + +CACHE_EXPIRE_TIME = 15 * 60 +TRUNCATION_MARKER = "[...]" + +LONG_DESCRIPTION = """ +""" + +PARAMETER_REGEX = re.compile(r"({{\s*\$parameters\.(\w+)\s*}})") + + +class BlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "name": "Twilio SMS Notification", + "version": "v1", + "short_description": "Send notification via Twilio SMS service", + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "sink", + } + ) + type: Literal["roboflow_core/twilio_sms_notification@v1"] + twilio_account_sid: Union[str, Selector(kind=[STRING_KIND, SECRET_KIND])] = Field( + title="Twilio Account SID", + description="Twilio Account SID. Visit " + "Twilio Console " + "to set up SMS service and fetch the value.", + private=True, + examples=["$inputs.twilio_account_sid"], + ) + twilio_auth_token: Union[str, Selector(kind=[STRING_KIND, SECRET_KIND])] = Field( + title="Twilio Auth Token", + description="Twilio Auth Token. Visit " + "Twilio Console " + "to set up SMS service and fetch the value.", + private=True, + examples=["$inputs.twilio_auth_token"], + ) + message: str = Field( + description="Content of the message to be send", + examples=[ + "During last 5 minutes detected \{\{ $parameters.num_instances \}\} instances" + ], + ) + sender_number: Union[str, Selector(kind=[STRING_KIND])] = Field( + description="Sender phone number", + examples=["$inputs.sender_number"], + ) + receiver_number: Union[str, Selector(kind=[STRING_KIND])] = Field( + description="Receiver phone number", + examples=["$inputs.receiver_number"], + ) + message_parameters: Dict[ + str, + Union[Selector(), Selector(), str, int, float, bool], + ] = Field( + description="References data to be used to construct each and every column", + examples=[ + { + "predictions": "$steps.model.predictions", + "reference": "$inputs.reference_class_names", + } + ], + default_factory=dict, + ) + message_parameters_operations: Dict[str, List[AllOperationsType]] = Field( + description="UQL definitions of operations to be performed on defined data w.r.t. each message parameter", + examples=[ + { + "predictions": [ + {"type": "DetectionsPropertyExtract", "property_name": "class_name"} + ] + } + ], + default_factory=dict, + ) + fire_and_forget: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( + default=True, + description="Boolean flag dictating if sink is supposed to be executed in the background, " + "not waiting on status of registration before end of workflow run. Use `True` if best-effort " + "registration is needed, use `False` while debugging and if error handling is needed", + examples=["$inputs.fire_and_forget", False], + ) + disable_sink: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( + default=False, + description="boolean flag that can be also reference to input - to arbitrarily disable " + "data collection for specific request", + examples=[False, "$inputs.disable_slack_notifications"], + ) + cooldown_seconds: Union[int, Selector(kind=[INTEGER_KIND])] = Field( + default=5, + description="Number of seconds to wait until follow-up notification can be sent. " + f"Maximum value: {CACHE_EXPIRE_TIME} seconds (15 minutes)", + examples=["$inputs.cooldown_seconds", 3], + json_schema_extra={ + "always_visible": True, + }, + ) + cooldown_session_key: str = Field( + description="Unique key used internally to implement cooldown. Must be unique for each step in your Workflow.", + examples=["session-1v73kdhfse"], + json_schema_extra={"hidden": True}, + ) + length_limit: Union[int, Selector(kind=[INTEGER_KIND])] = Field( + default=160, + description="Maximum number of characters in SMS notification (longer messages will be truncated).", + examples=["$inputs.sms_length_limit", 3], + json_schema_extra={ + "always_visible": True, + }, + ) + + @field_validator("cooldown_seconds") + @classmethod + def ensure_cooldown_seconds_within_bounds(cls, value: Any) -> dict: + if isinstance(value, int) and (value < 0 or value > CACHE_EXPIRE_TIME): + raise ValueError( + f"`cooldown_seconds` must be in range [0, {CACHE_EXPIRE_TIME}]" + ) + return value + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="error_status", kind=[BOOLEAN_KIND]), + OutputDefinition(name="throttling_status", kind=[BOOLEAN_KIND]), + OutputDefinition(name="message", kind=[STRING_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.4.0,<2.0.0" + + +class TwilioSMSNotificationBlockV1(WorkflowBlock): + + def __init__( + self, + cache: BaseCache, + background_tasks: Optional[BackgroundTasks], + thread_pool_executor: Optional[ThreadPoolExecutor], + ): + self._cache = cache + self._background_tasks = background_tasks + self._thread_pool_executor = thread_pool_executor + self._clients: Dict[str, Client] = {} + + @classmethod + def get_init_parameters(cls) -> List[str]: + return ["cache", "background_tasks", "thread_pool_executor"] + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockManifest + + def run( + self, + twilio_account_sid: str, + twilio_auth_token: str, + message: str, + sender_number: str, + receiver_number: str, + message_parameters: Dict[str, Any], + message_parameters_operations: Dict[str, List[AllOperationsType]], + fire_and_forget: bool, + disable_sink: bool, + cooldown_seconds: int, + cooldown_session_key: str, + length_limit: int, + ) -> BlockResult: + if disable_sink: + return { + "error_status": False, + "throttling_status": False, + "message": "Sink was disabled by parameter `disable_sink`", + } + credentials_hash = _hash_credentials( + twilio_account_sid=twilio_account_sid, + twilio_auth_token=twilio_auth_token, + ) + cache_key = _generate_cache_key_for_cooldown_session( + session_key=cooldown_session_key, + token_hash=credentials_hash, + ) + last_notification_fired = self._cache.get(cache_key) + seconds_since_last_notification = cooldown_seconds + if last_notification_fired is not None: + last_notification_fired = datetime.fromisoformat(last_notification_fired) + seconds_since_last_notification = ( + datetime.now() - last_notification_fired + ).total_seconds() + if seconds_since_last_notification < cooldown_seconds: + logging.info( + f"Activated `roboflow_core/twilio_sms_notification@v1` cooldown." + ) + return { + "error_status": False, + "throttling_status": True, + "message": "Sink cooldown applies", + } + if credentials_hash not in self._clients: + self._clients[credentials_hash] = Client( + account_sid=twilio_account_sid, + password=twilio_auth_token, + ) + client = self._clients[credentials_hash] + message = format_message( + message=message, + message_parameters=message_parameters, + message_parameters_operations=message_parameters_operations, + length_limit=length_limit, + ) + send_notification_handler = partial( + send_sms_notification, + client=client, + message=message, + sender_number=sender_number, + receiver_number=receiver_number, + ) + last_notification_fired = datetime.now().isoformat() + self._cache.set( + key=cache_key, value=last_notification_fired, expire=CACHE_EXPIRE_TIME + ) + if fire_and_forget and self._background_tasks: + self._background_tasks.add_task(send_notification_handler) + return { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + if fire_and_forget and self._thread_pool_executor: + self._thread_pool_executor.submit(send_notification_handler) + return { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + error_status, message = send_notification_handler() + return { + "error_status": error_status, + "throttling_status": False, + "message": message, + } + + +def _hash_credentials(twilio_account_sid: str, twilio_auth_token: str) -> str: + sid_hash = hashlib.sha256(twilio_account_sid.encode("utf-8")).hexdigest() + auth_token_hash = hashlib.sha256(twilio_auth_token.encode("utf-8")).hexdigest() + return f"{sid_hash}:{auth_token_hash}" + + +def _generate_cache_key_for_cooldown_session(session_key: str, token_hash: str) -> str: + return ( + f"workflows:steps_cache:roboflow_core/twilio_sms_notification@v1:" + f"{token_hash}:{session_key}:last_notification_time" + ) + + +def format_message( + message: str, + message_parameters: Dict[str, Any], + message_parameters_operations: Dict[str, List[AllOperationsType]], + length_limit: int, +) -> str: + matching_parameters = PARAMETER_REGEX.findall(message) + parameters_to_get_values = { + p[1] for p in matching_parameters if p[1] in message_parameters + } + parameters_values = {} + for parameter_name in parameters_to_get_values: + parameter_value = message_parameters[parameter_name] + operations = message_parameters_operations.get(parameter_name) + if not operations: + parameters_values[parameter_name] = parameter_value + continue + operations_chain = build_operations_chain(operations=operations) + parameters_values[parameter_name] = operations_chain( + parameter_value, global_parameters={} + ) + parameter_to_placeholders = defaultdict(list) + for placeholder, parameter_name in matching_parameters: + if parameter_name not in parameters_to_get_values: + continue + parameter_to_placeholders[parameter_name].append(placeholder) + for parameter_name, placeholders in parameter_to_placeholders.items(): + for placeholder in placeholders: + message = message.replace( + placeholder, str(parameters_values[parameter_name]) + ) + if len(message) > length_limit: + truncated_message = message[: length_limit - 1 - len(TRUNCATION_MARKER)] + message = f"{truncated_message} {TRUNCATION_MARKER}" + return message + + +def send_sms_notification( + client: Client, + message: str, + sender_number: str, + receiver_number: str, +) -> Tuple[bool, str]: + try: + client.messages.create( + body=message, + from_=sender_number, + to=receiver_number, + ) + return False, "Notification sent successfully" + except Exception as error: + logging.warning(f"Could not send Slack notification. Error: {str(error)}") + return True, f"Failed to Slack notification. Internal error details: {error}" diff --git a/requirements/_requirements.txt b/requirements/_requirements.txt index e57ea10dd..2e5bf8bac 100644 --- a/requirements/_requirements.txt +++ b/requirements/_requirements.txt @@ -33,4 +33,6 @@ anthropic~=0.34.2 pandas>=2.0.0,<2.3.0 pytest>=8.0.0,<9.0.0 # this is not a joke, sam2 requires this as the fork we are using is dependent on that, yet # do not mark the dependency: https://github.com/SauravMaheshkar/samv2/blob/main/sam2/utils/download.py -tokenizers>=0.19.0,<=0.20.3 \ No newline at end of file +tokenizers>=0.19.0,<=0.20.3 +slack-sdk~=3.33.4 +twilio~=9.3.7 From c1c6664677d0d25df9642794779a7603975183a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 28 Nov 2024 15:56:17 +0100 Subject: [PATCH 03/10] WIP --- .../environment_secrets_store/v1.py | 2 +- .../core_steps/sinks/slack/notification/v1.py | 1 + .../test_workflow_with_slack_notification.py | 47 +++++++++++++++++++ 3 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 tests/workflows/integration_tests/execution/test_workflow_with_slack_notification.py diff --git a/inference/core/workflows/core_steps/secrets_providers/environment_secrets_store/v1.py b/inference/core/workflows/core_steps/secrets_providers/environment_secrets_store/v1.py index 8cd08f9cd..d78304223 100644 --- a/inference/core/workflows/core_steps/secrets_providers/environment_secrets_store/v1.py +++ b/inference/core/workflows/core_steps/secrets_providers/environment_secrets_store/v1.py @@ -36,7 +36,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: def get_actual_outputs(self) -> List[OutputDefinition]: return [ - OutputDefinition(name=variable_name.lower(), kind=SECRET_KIND) + OutputDefinition(name=variable_name.lower(), kind=[SECRET_KIND]) for variable_name in self.variables_storing_secrets ] diff --git a/inference/core/workflows/core_steps/sinks/slack/notification/v1.py b/inference/core/workflows/core_steps/sinks/slack/notification/v1.py index 332edc034..1ced6813f 100644 --- a/inference/core/workflows/core_steps/sinks/slack/notification/v1.py +++ b/inference/core/workflows/core_steps/sinks/slack/notification/v1.py @@ -184,6 +184,7 @@ def run( cooldown_seconds: int, cooldown_session_key: str, ) -> BlockResult: + print("slack_token", slack_token) if disable_sink: return { "error_status": False, diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_slack_notification.py b/tests/workflows/integration_tests/execution/test_workflow_with_slack_notification.py new file mode 100644 index 000000000..e45542eac --- /dev/null +++ b/tests/workflows/integration_tests/execution/test_workflow_with_slack_notification.py @@ -0,0 +1,47 @@ +from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS +from inference.core.workflows.execution_engine.core import ExecutionEngine + +WORKFLOW_WITH_PURE_TEXT_NOTIFICATION = { + "version": "1.4.0", + "inputs": [], + "steps": [ + { + "type": "roboflow_core/environment_secrets_store@v1", + "name": "vault", + "variables_storing_secrets": ["SLACK_TOKEN"], + }, + { + "type": "roboflow_core/slack_notification@v1", + "name": "notification", + "slack_token": "$steps.vault.slack_token", + "message": "This is example message", + "channel": "xxxs", + "fire_and_forget": False, + "cooldown_seconds": 0, + "cooldown_session_key": "some-unique-key", + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "status", + "selector": "$steps.notification.error_status", + }, + ], +} + + +def test_minimalist_workflow_with_slack_notifications() -> None: + # given + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_PURE_TEXT_NOTIFICATION, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={}, + ) + + # then + print(result) From d131d06cc089ff274d0f50966fe656799557f9ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 28 Nov 2024 17:18:27 +0100 Subject: [PATCH 04/10] Add first working version of the blocks --- inference/core/entities/common.py | 2 +- .../classical_cv/image_preprocessing/v1.py | 2 +- .../core_steps/sinks/slack/notification/v1.py | 1 - .../core_steps/sinks/twilio/sms/v1.py | 4 +- .../step_input_assembler.py | 48 ++-- .../test_workflow_with_slack_notification.py | 228 +++++++++++++++++- ...test_workflow_with_twillio_notification.py | 123 ++++++++++ 7 files changed, 381 insertions(+), 27 deletions(-) create mode 100644 tests/workflows/integration_tests/execution/test_workflow_with_twillio_notification.py diff --git a/inference/core/entities/common.py b/inference/core/entities/common.py index 768c97209..5dbc98c23 100644 --- a/inference/core/entities/common.py +++ b/inference/core/entities/common.py @@ -3,7 +3,7 @@ ModelID = Field(example="raccoon-detector-1", description="A unique model identifier") ModelType = Field( default=None, - example="object-detection", + examples=["object-detection"], description="The type of the model, usually referring to what task the model performs", ) ApiKey = Field( diff --git a/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py b/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py index d4b4caf59..bb51a2786 100644 --- a/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py @@ -90,7 +90,7 @@ class ImagePreprocessingManifest(WorkflowBlockManifest): description="Positive value to rotate clockwise, negative value to rotate counterclockwise", default=90, examples=[90, "$inputs.rotation_degrees"], - gte=-360, + ge=-360, le=360, json_schema_extra={ "relevant_for": { diff --git a/inference/core/workflows/core_steps/sinks/slack/notification/v1.py b/inference/core/workflows/core_steps/sinks/slack/notification/v1.py index 1ced6813f..332edc034 100644 --- a/inference/core/workflows/core_steps/sinks/slack/notification/v1.py +++ b/inference/core/workflows/core_steps/sinks/slack/notification/v1.py @@ -184,7 +184,6 @@ def run( cooldown_seconds: int, cooldown_session_key: str, ) -> BlockResult: - print("slack_token", slack_token) if disable_sink: return { "error_status": False, diff --git a/inference/core/workflows/core_steps/sinks/twilio/sms/v1.py b/inference/core/workflows/core_steps/sinks/twilio/sms/v1.py index 1dfcf36e7..629faeae1 100644 --- a/inference/core/workflows/core_steps/sinks/twilio/sms/v1.py +++ b/inference/core/workflows/core_steps/sinks/twilio/sms/v1.py @@ -233,8 +233,8 @@ def run( } if credentials_hash not in self._clients: self._clients[credentials_hash] = Client( - account_sid=twilio_account_sid, - password=twilio_auth_token, + twilio_account_sid, + twilio_auth_token, ) client = self._clients[credentials_hash] message = format_message( diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index cc5b01064..91c68d2e3 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -39,6 +39,10 @@ class NonBatchModeSIMDStepInput: parameters: Dict[str, Any] +class EmptyStepOutputError(Exception): + pass + + def construct_non_simd_step_input( step_node: StepNode, runtime_parameters: Dict[str, Any], @@ -77,22 +81,25 @@ def construct_non_simd_step_input( if False in masks: return None result = {} - for parameter_name, parameter_spec in step_node.input_data.items(): - if parameter_spec.is_compound_input(): - result[parameter_name] = construct_non_simd_step_compound_input( - step_node=step_node, - parameter_spec=parameter_spec, - runtime_parameters=runtime_parameters, - execution_cache=execution_cache, - ) - else: - result[parameter_name] = construct_non_simd_step_non_compound_input( - step_node=step_node, - parameter_spec=parameter_spec, - runtime_parameters=runtime_parameters, - execution_cache=execution_cache, - ) - return result + try: + for parameter_name, parameter_spec in step_node.input_data.items(): + if parameter_spec.is_compound_input(): + result[parameter_name] = construct_non_simd_step_compound_input( + step_node=step_node, + parameter_spec=parameter_spec, + runtime_parameters=runtime_parameters, + execution_cache=execution_cache, + ) + else: + result[parameter_name] = construct_non_simd_step_non_compound_input( + step_node=step_node, + parameter_spec=parameter_spec, + runtime_parameters=runtime_parameters, + execution_cache=execution_cache, + ) + return result + except EmptyStepOutputError: + return None def construct_non_simd_step_compound_input( @@ -182,7 +189,14 @@ def construct_non_simd_step_non_compound_input( return runtime_parameters[input_name] if parameter_spec.points_to_step_output(): parameter_spec: DynamicStepInputDefinition = parameter_spec # type: ignore - return execution_cache.get_non_batch_output(selector=parameter_spec.selector) + step_output = execution_cache.get_non_batch_output( + selector=parameter_spec.selector + ) + if step_output is None: + raise EmptyStepOutputError( + f"Encountered empty step output under: {parameter_spec.selector}" + ) + return step_output parameter_spec: StaticStepInputDefinition = parameter_spec # type: ignore return parameter_spec.value diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_slack_notification.py b/tests/workflows/integration_tests/execution/test_workflow_with_slack_notification.py index e45542eac..cf4adee5e 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_slack_notification.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_slack_notification.py @@ -1,9 +1,24 @@ +import os + +import numpy as np +import pytest + from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS +from inference.core.managers.base import ModelManager +from inference.core.workflows.core_steps.common.entities import StepExecutionMode from inference.core.workflows.execution_engine.core import ExecutionEngine +from tests.workflows.integration_tests.execution.workflows_gallery_collector.decorators import ( + add_to_workflows_gallery, +) + +SLACK_TOKEN = os.getenv("SLACK_TOKEN") +SLACK_CHANNEL_ID = os.getenv("SLACK_CHANNEL_ID") WORKFLOW_WITH_PURE_TEXT_NOTIFICATION = { "version": "1.4.0", - "inputs": [], + "inputs": [ + {"type": "WorkflowParameter", "name": "channel_id"}, + ], "steps": [ { "type": "roboflow_core/environment_secrets_store@v1", @@ -15,11 +30,11 @@ "name": "notification", "slack_token": "$steps.vault.slack_token", "message": "This is example message", - "channel": "xxxs", + "channel": "$inputs.channel_id", "fire_and_forget": False, "cooldown_seconds": 0, "cooldown_session_key": "some-unique-key", - } + }, ], "outputs": [ { @@ -31,6 +46,10 @@ } +@pytest.mark.skipif(SLACK_TOKEN is None, reason="`SLACK_TOKEN` variable not exported") +@pytest.mark.skipif( + SLACK_CHANNEL_ID is None, reason="`SLACK_CHANNEL_ID` variable not exported" +) def test_minimalist_workflow_with_slack_notifications() -> None: # given execution_engine = ExecutionEngine.init( @@ -40,8 +59,207 @@ def test_minimalist_workflow_with_slack_notifications() -> None: # when result = execution_engine.run( - runtime_parameters={}, + runtime_parameters={ + "channel_id": SLACK_CHANNEL_ID, + }, + ) + + # then + assert result[0]["status"] is False + + +WORKFLOW_SENDING_PREDICTION_SUMMARY = { + "version": "1.4.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + { + "type": "WorkflowParameter", + "name": "model_id", + "default_value": "yolov8n-640", + }, + {"type": "WorkflowParameter", "name": "channel_id"}, + {"type": "WorkflowParameter", "name": "slack_token"}, + ], + "steps": [ + { + "type": "roboflow_core/roboflow_keypoint_detection_model@v2", + "name": "detection", + "image": "$inputs.image", + "model_id": "$inputs.model_id", + }, + { + "type": "roboflow_core/slack_notification@v1", + "name": "notification", + "slack_token": "$inputs.slack_token", + "message": "Detected {{ $parameters.predictions }} objects", + "channel": "$inputs.channel_id", + "message_parameters": { + "predictions": "$steps.detection.predictions", + }, + "message_parameters_operations": { + "predictions": [{"type": "SequenceLength"}], + }, + "fire_and_forget": False, + "cooldown_seconds": 0, + "cooldown_session_key": "some-unique-key", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "status", + "selector": "$steps.notification.error_status", + }, + ], +} + + +@add_to_workflows_gallery( + category="Integration with external apps", + use_case_title="Workflow sending notification to Slack", + use_case_description=""" +This Workflow illustrates how to send notification to Slack. + """, + workflow_definition=WORKFLOW_SENDING_PREDICTION_SUMMARY, + workflow_name_in_app="basic-slack-notification", +) +@pytest.mark.skipif(SLACK_TOKEN is None, reason="`SLACK_TOKEN` variable not exported") +@pytest.mark.skipif( + SLACK_CHANNEL_ID is None, reason="`SLACK_CHANNEL_ID` variable not exported" +) +def test_workflow_with_message_based_on_other_step_output( + model_manager: ModelManager, + crowd_image: np.ndarray, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_SENDING_PREDICTION_SUMMARY, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": [crowd_image, crowd_image], + "channel_id": SLACK_CHANNEL_ID, + "slack_token": SLACK_TOKEN, + }, + ) + + # then + assert result[0]["status"] is False + assert result[1]["status"] is False + + +WORKFLOW_SENDING_PREDICTION_SUMMARY_AND_FILES = { + "version": "1.4.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + { + "type": "WorkflowParameter", + "name": "model_id", + "default_value": "yolov8n-640", + }, + {"type": "WorkflowParameter", "name": "channel_id"}, + {"type": "WorkflowParameter", "name": "slack_token"}, + ], + "steps": [ + { + "type": "roboflow_core/roboflow_keypoint_detection_model@v2", + "name": "detection", + "image": "$inputs.image", + "model_id": "$inputs.model_id", + }, + { + "type": "roboflow_core/property_definition@v1", + "name": "image_serialization", + "data": "$inputs.image", + "operations": [{"type": "ConvertImageToJPEG"}], + }, + { + "type": "roboflow_core/property_definition@v1", + "name": "predictions_serialization", + "data": "$steps.detection.predictions", + "operations": [ + {"type": "DetectionsToDictionary"}, + {"type": "ConvertDictionaryToJSON"}, + ], + }, + { + "type": "roboflow_core/slack_notification@v1", + "name": "notification", + "slack_token": "$inputs.slack_token", + "message": "Detected {{ $parameters.predictions }} objects", + "channel": "$inputs.channel_id", + "message_parameters": { + "predictions": "$steps.detection.predictions", + }, + "message_parameters_operations": { + "predictions": [{"type": "SequenceLength"}], + }, + "attachments": { + "image.jpg": "$steps.image_serialization.output", + "prediction.json": "$steps.predictions_serialization.output", + }, + "fire_and_forget": False, + "cooldown_seconds": 0, + "cooldown_session_key": "some-unique-key", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "status", + "selector": "$steps.notification.error_status", + }, + ], +} + + +@add_to_workflows_gallery( + category="Integration with external apps", + use_case_title="Workflow sending notification with attachments to Slack", + use_case_description=""" +This Workflow illustrates how to send notification with attachments to Slack. + """, + workflow_definition=WORKFLOW_SENDING_PREDICTION_SUMMARY_AND_FILES, + workflow_name_in_app="basic-slack-notification", +) +@pytest.mark.skipif(SLACK_TOKEN is None, reason="`SLACK_TOKEN` variable not exported") +@pytest.mark.skipif( + SLACK_CHANNEL_ID is None, reason="`SLACK_CHANNEL_ID` variable not exported" +) +def test_workflow_sending_attachments_to_slack( + model_manager: ModelManager, + crowd_image: np.ndarray, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_SENDING_PREDICTION_SUMMARY_AND_FILES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": [crowd_image, crowd_image], + "channel_id": SLACK_CHANNEL_ID, + "slack_token": SLACK_TOKEN, + }, ) # then - print(result) + assert result[0]["status"] is False + assert result[1]["status"] is False diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_twillio_notification.py b/tests/workflows/integration_tests/execution/test_workflow_with_twillio_notification.py new file mode 100644 index 000000000..bae97d383 --- /dev/null +++ b/tests/workflows/integration_tests/execution/test_workflow_with_twillio_notification.py @@ -0,0 +1,123 @@ +import os + +import numpy as np +import pytest + +from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS +from inference.core.managers.base import ModelManager +from inference.core.workflows.core_steps.common.entities import StepExecutionMode +from inference.core.workflows.execution_engine.core import ExecutionEngine +from tests.workflows.integration_tests.execution.workflows_gallery_collector.decorators import ( + add_to_workflows_gallery, +) + +WORKFLOWS_TWILIO_ACCOUNT_SID = os.getenv("WORKFLOWS_TWILIO_ACCOUNT_SID") +WORKFLOWS_TWILIO_AUTH_TOKEN = os.getenv("WORKFLOWS_TWILIO_AUTH_TOKEN") +WORKFLOWS_TWILIO_PHONE_NUMBER = os.getenv("WORKFLOWS_TWILIO_PHONE_NUMBER") +WORKFLOWS_RECEIVER_PHONE_NUMBER = os.getenv("WORKFLOWS_RECEIVER_PHONE_NUMBER") + + +WORKFLOW_SENDING_PREDICTION_SUMMARY = { + "version": "1.4.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + { + "type": "WorkflowParameter", + "name": "model_id", + "default_value": "yolov8n-640", + }, + {"type": "WorkflowParameter", "name": "account_sid"}, + {"type": "WorkflowParameter", "name": "auth_token"}, + {"type": "WorkflowParameter", "name": "sender_number"}, + {"type": "WorkflowParameter", "name": "receiver_number"}, + ], + "steps": [ + { + "type": "roboflow_core/roboflow_keypoint_detection_model@v2", + "name": "detection", + "image": "$inputs.image", + "model_id": "$inputs.model_id", + }, + { + "type": "roboflow_core/twilio_sms_notification@v1", + "name": "notification", + "twilio_account_sid": "$inputs.account_sid", + "twilio_auth_token": "$inputs.auth_token", + "message": "Detected {{ $parameters.predictions }} objects", + "sender_number": "$inputs.sender_number", + "receiver_number": "$inputs.receiver_number", + "message_parameters": { + "predictions": "$steps.detection.predictions", + }, + "message_parameters_operations": { + "predictions": [{"type": "SequenceLength"}], + }, + "fire_and_forget": False, + "cooldown_seconds": 0, + "cooldown_session_key": "some-unique-key", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "status", + "selector": "$steps.notification.error_status", + }, + ], +} + + +@add_to_workflows_gallery( + category="Integration with external apps", + use_case_title="Workflow sending notification to Slack", + use_case_description=""" +This Workflow illustrates how to send notification to Slack. + """, + workflow_definition=WORKFLOW_SENDING_PREDICTION_SUMMARY, + workflow_name_in_app="basic-slack-notification", +) +@pytest.mark.skipif( + WORKFLOWS_TWILIO_ACCOUNT_SID is None, + reason="`WORKFLOWS_TWILIO_ACCOUNT_SID` variable not exported", +) +@pytest.mark.skipif( + WORKFLOWS_TWILIO_AUTH_TOKEN is None, + reason="`WORKFLOWS_TWILIO_AUTH_TOKEN` variable not exported", +) +@pytest.mark.skipif( + WORKFLOWS_TWILIO_PHONE_NUMBER is None, + reason="`WORKFLOWS_TWILIO_PHONE_NUMBER` variable not exported", +) +@pytest.mark.skipif( + WORKFLOWS_RECEIVER_PHONE_NUMBER is None, + reason="`WORKFLOWS_RECEIVER_PHONE_NUMBER` variable not exported", +) +def test_workflow_with_message_based_on_other_step_output( + model_manager: ModelManager, + crowd_image: np.ndarray, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_SENDING_PREDICTION_SUMMARY, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": [crowd_image], + "account_sid": WORKFLOWS_TWILIO_ACCOUNT_SID, + "auth_token": WORKFLOWS_TWILIO_AUTH_TOKEN, + "sender_number": WORKFLOWS_TWILIO_PHONE_NUMBER, + "receiver_number": WORKFLOWS_RECEIVER_PHONE_NUMBER, + }, + ) + + # then + assert result[0]["status"] is False From 6fc7d97a8746780fa0f198d74645db5c27f23d35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 28 Nov 2024 17:20:43 +0100 Subject: [PATCH 05/10] Fix test description --- .../execution/test_workflow_with_twillio_notification.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_twillio_notification.py b/tests/workflows/integration_tests/execution/test_workflow_with_twillio_notification.py index bae97d383..19c87e3bd 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_twillio_notification.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_twillio_notification.py @@ -69,12 +69,12 @@ @add_to_workflows_gallery( category="Integration with external apps", - use_case_title="Workflow sending notification to Slack", + use_case_title="Workflow sending SMS notification with Twilio", use_case_description=""" -This Workflow illustrates how to send notification to Slack. +This Workflow illustrates how to send SMS notification with Twilio. """, workflow_definition=WORKFLOW_SENDING_PREDICTION_SUMMARY, - workflow_name_in_app="basic-slack-notification", + workflow_name_in_app="basic-twilio-sms-notification", ) @pytest.mark.skipif( WORKFLOWS_TWILIO_ACCOUNT_SID is None, From 2ac2d53320dd1550bb4f9b29b21646e9241699f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 29 Nov 2024 12:57:36 +0100 Subject: [PATCH 06/10] Modify Execution Engine to fix bug with empty scalar steps outputs feeding into downstream steps --- .../execution_engine/v1/executor/core.py | 2 +- .../step_input_assembler.py | 210 ++-- .../v1/executor/output_constructor.py | 31 +- .../integration_tests/execution/conftest.py | 3 + .../__init__.py | 221 +++++ ...workflow_with_environment_secrets_store.py | 918 ++++++++++++++++++ .../test_workflow_with_google_vision_ocr.py | 4 +- .../test_workflow_with_slack_notification.py | 4 +- ...test_workflow_with_twillio_notification.py | 2 +- 9 files changed, 1293 insertions(+), 102 deletions(-) create mode 100644 tests/workflows/integration_tests/execution/stub_plugins/plugin_testing_non_simd_step_with_optional_outputs/__init__.py create mode 100644 tests/workflows/integration_tests/execution/test_workflow_with_environment_secrets_store.py diff --git a/inference/core/workflows/execution_engine/v1/executor/core.py b/inference/core/workflows/execution_engine/v1/executor/core.py index 494c7436c..945c31733 100644 --- a/inference/core/workflows/execution_engine/v1/executor/core.py +++ b/inference/core/workflows/execution_engine/v1/executor/core.py @@ -279,7 +279,7 @@ def run_non_simd_step( step_selector=step_selector ) if step_input is None: - # discarded by conditional execution + # discarded by conditional execution or empty value from upstream step return None step_name = get_last_chunk_of_selector(selector=step_selector) step_instance = workflow.steps[step_name].step diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index 91c68d2e3..79f9190f0 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -39,10 +39,6 @@ class NonBatchModeSIMDStepInput: parameters: Dict[str, Any] -class EmptyStepOutputError(Exception): - pass - - def construct_non_simd_step_input( step_node: StepNode, runtime_parameters: Dict[str, Any], @@ -81,25 +77,35 @@ def construct_non_simd_step_input( if False in masks: return None result = {} - try: - for parameter_name, parameter_spec in step_node.input_data.items(): - if parameter_spec.is_compound_input(): - result[parameter_name] = construct_non_simd_step_compound_input( + detected_empty_step_output_selector = False + for parameter_name, parameter_spec in step_node.input_data.items(): + if parameter_spec.is_compound_input(): + result[parameter_name], contains_empty_step_output_selector = ( + construct_non_simd_step_compound_input( step_node=step_node, parameter_spec=parameter_spec, runtime_parameters=runtime_parameters, execution_cache=execution_cache, ) - else: - result[parameter_name] = construct_non_simd_step_non_compound_input( + ) + else: + result[parameter_name], contains_empty_step_output_selector = ( + construct_non_simd_step_non_compound_input( step_node=step_node, parameter_spec=parameter_spec, runtime_parameters=runtime_parameters, execution_cache=execution_cache, ) - return result - except EmptyStepOutputError: + ) + detected_empty_step_output_selector = ( + detected_empty_step_output_selector or contains_empty_step_output_selector + ) + if ( + detected_empty_step_output_selector + and not step_node.step_manifest.accepts_empty_values() + ): return None + return result def construct_non_simd_step_compound_input( @@ -107,7 +113,7 @@ def construct_non_simd_step_compound_input( parameter_spec: CompoundStepInputDefinition, runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, -) -> Any: +) -> Tuple[Any, bool]: if parameter_spec.represents_list_of_inputs(): return construct_non_simd_step_compound_list_input( step_node=step_node, @@ -128,17 +134,23 @@ def construct_non_simd_step_compound_list_input( parameter_spec: CompoundStepInputDefinition, runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, -) -> List[Any]: +) -> Tuple[List[Any], bool]: result = [] + contains_empty_step_output_selector = False for nested_definition in parameter_spec.iterate_through_definitions(): - nested_value = construct_non_simd_step_non_compound_input( - step_node=step_node, - parameter_spec=nested_definition, - runtime_parameters=runtime_parameters, - execution_cache=execution_cache, + nested_value, value_contains_empty_selector = ( + construct_non_simd_step_non_compound_input( + step_node=step_node, + parameter_spec=nested_definition, + runtime_parameters=runtime_parameters, + execution_cache=execution_cache, + ) ) result.append(nested_value) - return result + contains_empty_step_output_selector = ( + contains_empty_step_output_selector or value_contains_empty_selector + ) + return result, contains_empty_step_output_selector def construct_non_simd_step_compound_dict_input( @@ -146,18 +158,23 @@ def construct_non_simd_step_compound_dict_input( parameter_spec: CompoundStepInputDefinition, runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, -) -> Dict[str, Any]: +) -> Tuple[Dict[str, Any], bool]: result = {} + contains_empty_step_output_selector = False for nested_definition in parameter_spec.iterate_through_definitions(): - result[nested_definition.parameter_specification.nested_element_key] = ( - construct_non_simd_step_non_compound_input( - step_node=step_node, - parameter_spec=nested_definition, - runtime_parameters=runtime_parameters, - execution_cache=execution_cache, - ) + ( + result[nested_definition.parameter_specification.nested_element_key], + value_contains_empty_selector, + ) = construct_non_simd_step_non_compound_input( + step_node=step_node, + parameter_spec=nested_definition, + runtime_parameters=runtime_parameters, + execution_cache=execution_cache, ) - return result + contains_empty_step_output_selector = ( + contains_empty_step_output_selector or value_contains_empty_selector + ) + return result, contains_empty_step_output_selector def construct_non_simd_step_non_compound_input( @@ -165,7 +182,7 @@ def construct_non_simd_step_non_compound_input( parameter_spec: StepInputDefinition, runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, -) -> Any: +) -> Tuple[Any, bool]: if parameter_spec.is_compound_input(): raise AssumptionError( public_message=f"Workflows Execution Error encountered unexpected state probably related to the fact " @@ -186,19 +203,15 @@ def construct_non_simd_step_non_compound_input( if parameter_spec.points_to_input(): parameter_spec: DynamicStepInputDefinition = parameter_spec # type: ignore input_name = get_last_chunk_of_selector(selector=parameter_spec.selector) - return runtime_parameters[input_name] + return runtime_parameters[input_name], False if parameter_spec.points_to_step_output(): parameter_spec: DynamicStepInputDefinition = parameter_spec # type: ignore step_output = execution_cache.get_non_batch_output( selector=parameter_spec.selector ) - if step_output is None: - raise EmptyStepOutputError( - f"Encountered empty step output under: {parameter_spec.selector}" - ) - return step_output + return step_output, step_output is None parameter_spec: StaticStepInputDefinition = parameter_spec # type: ignore - return parameter_spec.value + return parameter_spec.value, False def iterate_over_simd_step_input( @@ -341,32 +354,41 @@ def prepare_parameters( indices_for_parameter = {} guard_of_indices_wrapping = GuardForIndicesWrapping() compound_inputs = set() + contains_empty_scalar_step_output_selector = False for parameter_name, parameter_specs in step_node.input_data.items(): if parameter_specs.is_compound_input(): - result[parameter_name], indices_for_parameter[parameter_name] = ( - get_compound_parameter_value( - parameter=parameter_specs, - step_execution_dimensionality=step_node.step_execution_dimensionality, - masks=masks, - dynamic_batches_manager=dynamic_batches_manager, - runtime_parameters=runtime_parameters, - execution_cache=execution_cache, - guard_of_indices_wrapping=guard_of_indices_wrapping, - ) + ( + result[parameter_name], + indices_for_parameter[parameter_name], + value_contains_empty_scalar_step_output_selector, + ) = get_compound_parameter_value( + parameter=parameter_specs, + step_execution_dimensionality=step_node.step_execution_dimensionality, + masks=masks, + dynamic_batches_manager=dynamic_batches_manager, + runtime_parameters=runtime_parameters, + execution_cache=execution_cache, + guard_of_indices_wrapping=guard_of_indices_wrapping, ) compound_inputs.add(parameter_name) else: - result[parameter_name], indices_for_parameter[parameter_name] = ( - get_non_compound_parameter_value( - parameter=parameter_specs, - step_execution_dimensionality=step_node.step_execution_dimensionality, - masks=masks, - dynamic_batches_manager=dynamic_batches_manager, - runtime_parameters=runtime_parameters, - execution_cache=execution_cache, - guard_of_indices_wrapping=guard_of_indices_wrapping, - ) + ( + result[parameter_name], + indices_for_parameter[parameter_name], + value_contains_empty_scalar_step_output_selector, + ) = get_non_compound_parameter_value( + parameter=parameter_specs, + step_execution_dimensionality=step_node.step_execution_dimensionality, + masks=masks, + dynamic_batches_manager=dynamic_batches_manager, + runtime_parameters=runtime_parameters, + execution_cache=execution_cache, + guard_of_indices_wrapping=guard_of_indices_wrapping, ) + contains_empty_scalar_step_output_selector = ( + contains_empty_scalar_step_output_selector + or value_contains_empty_scalar_step_output_selector + ) batch_parameters_indices = [ i for i in indices_for_parameter.values() if i is not None ] @@ -382,6 +404,11 @@ def prepare_parameters( ) indices = batch_parameters_indices[0] if not step_node.step_manifest.accepts_empty_values(): + if contains_empty_scalar_step_output_selector: + return BatchModeSIMDStepInput( + indices=[], + parameters={}, + ) empty_indices = get_empty_batch_elements_indices(value=result) indices = [e for e in indices if e not in empty_indices] result = remove_indices(value=result, indices=empty_indices) @@ -399,49 +426,62 @@ def get_compound_parameter_value( runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, guard_of_indices_wrapping: GuardForIndicesWrapping, -) -> Tuple[Union[list, Dict[str, Any]], Optional[List[DynamicBatchIndex]]]: +) -> Tuple[Union[list, Dict[str, Any]], Optional[List[DynamicBatchIndex]], bool]: + contains_empty_scalar_step_output_selector = False batch_indices = [] if parameter.represents_list_of_inputs(): result = [] for nested_element in parameter.iterate_through_definitions(): - non_compound_parameter_value, non_compound_indices = ( - get_non_compound_parameter_value( - parameter=nested_element, - step_execution_dimensionality=step_execution_dimensionality, - masks=masks, - dynamic_batches_manager=dynamic_batches_manager, - runtime_parameters=runtime_parameters, - execution_cache=execution_cache, - guard_of_indices_wrapping=guard_of_indices_wrapping, - ) + ( + non_compound_parameter_value, + non_compound_indices, + value_contains_empty_scalar_step_output_selector, + ) = get_non_compound_parameter_value( + parameter=nested_element, + step_execution_dimensionality=step_execution_dimensionality, + masks=masks, + dynamic_batches_manager=dynamic_batches_manager, + runtime_parameters=runtime_parameters, + execution_cache=execution_cache, + guard_of_indices_wrapping=guard_of_indices_wrapping, ) result.append(non_compound_parameter_value) + contains_empty_scalar_step_output_selector = ( + contains_empty_scalar_step_output_selector + or value_contains_empty_scalar_step_output_selector + ) if non_compound_indices is not None: batch_indices.append(non_compound_indices) else: result = {} for nested_element in parameter.iterate_through_definitions(): - non_compound_parameter_value, non_compound_indices = ( - get_non_compound_parameter_value( - parameter=nested_element, - step_execution_dimensionality=step_execution_dimensionality, - masks=masks, - dynamic_batches_manager=dynamic_batches_manager, - runtime_parameters=runtime_parameters, - execution_cache=execution_cache, - guard_of_indices_wrapping=guard_of_indices_wrapping, - ) + ( + non_compound_parameter_value, + non_compound_indices, + value_contains_empty_scalar_step_output_selector, + ) = get_non_compound_parameter_value( + parameter=nested_element, + step_execution_dimensionality=step_execution_dimensionality, + masks=masks, + dynamic_batches_manager=dynamic_batches_manager, + runtime_parameters=runtime_parameters, + execution_cache=execution_cache, + guard_of_indices_wrapping=guard_of_indices_wrapping, ) result[nested_element.parameter_specification.nested_element_key] = ( non_compound_parameter_value ) + contains_empty_scalar_step_output_selector = ( + contains_empty_scalar_step_output_selector + or value_contains_empty_scalar_step_output_selector + ) if non_compound_indices is not None: batch_indices.append(non_compound_indices) ensure_compound_input_indices_match(indices=batch_indices) result_indices = None if len(batch_indices) > 0: result_indices = batch_indices[0] - return result, result_indices + return result, result_indices, contains_empty_scalar_step_output_selector def get_non_compound_parameter_value( @@ -452,23 +492,23 @@ def get_non_compound_parameter_value( runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, guard_of_indices_wrapping: GuardForIndicesWrapping, -) -> Union[Any, Optional[List[DynamicBatchIndex]]]: +) -> Union[Any, Optional[List[DynamicBatchIndex]], bool]: if not parameter.is_batch_oriented(): if parameter.points_to_input(): input_parameter: DynamicStepInputDefinition = parameter # type: ignore parameter_name = get_last_chunk_of_selector( selector=input_parameter.selector ) - return runtime_parameters[parameter_name], None + return runtime_parameters[parameter_name], None, False elif parameter.points_to_step_output(): input_parameter: DynamicStepInputDefinition = parameter # type: ignore value = execution_cache.get_non_batch_output( selector=input_parameter.selector ) - return value, None + return value, None, value is None else: static_input: StaticStepInputDefinition = parameter # type: ignore - return static_input.value, None + return static_input.value, None, False dynamic_parameter: DynamicStepInputDefinition = parameter # type: ignore parameter_dimensionality = dynamic_parameter.get_dimensionality() lineage_indices = dynamic_batches_manager.get_indices_for_data_lineage( @@ -505,7 +545,7 @@ def get_non_compound_parameter_value( mask=mask_for_dimension, ) if step_execution_dimensionality == parameter_dimensionality: - return Batch(batch_input, lineage_indices), lineage_indices + return Batch(batch_input, lineage_indices), lineage_indices, False if step_execution_dimensionality > parameter_dimensionality: raise ExecutionEngineRuntimeError( public_message=f"Detected a situation when parameter: " @@ -539,7 +579,7 @@ def get_non_compound_parameter_value( data=batch_input, guard_of_indices_wrapping=guard_of_indices_wrapping, ) - return result, result.indices + return result, result.indices, False def _flatten_batch_oriented_inputs( diff --git a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py index 6d5a58060..a38d49003 100644 --- a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py +++ b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py @@ -46,27 +46,36 @@ def construct_workflow_output( batch_oriented_outputs = { output for output, indices in output_name2indices.items() if indices is not None } - non_batch_outputs = { - output.name: execution_data_manager.get_non_batch_data(selector=output.selector) - for output in workflow_outputs - if output.name not in batch_oriented_outputs - } - if not batch_oriented_outputs: - return [non_batch_outputs] - dimensionality_for_output_nodes = { + kinds_of_output_nodes = { output.name: node_as( execution_graph=execution_graph, node=construct_output_selector(name=output.name), expected_type=OutputNode, - ).dimensionality + ).kind for output in workflow_outputs } - kinds_of_output_nodes = { + non_batch_outputs = {} + for output in workflow_outputs: + if output.name in batch_oriented_outputs: + continue + data_piece = execution_data_manager.get_non_batch_data(selector=output.selector) + if serialize_results: + output_kind = kinds_of_output_nodes[output.name] + data_piece = serialize_data_piece( + output_name=output.name, + data_piece=data_piece, + kind=output_kind, + kinds_serializers=kinds_serializers, + ) + non_batch_outputs[output.name] = data_piece + if not batch_oriented_outputs: + return [non_batch_outputs] + dimensionality_for_output_nodes = { output.name: node_as( execution_graph=execution_graph, node=construct_output_selector(name=output.name), expected_type=OutputNode, - ).kind + ).dimensionality for output in workflow_outputs } outputs_arrays: Dict[str, Optional[list]] = { diff --git a/tests/workflows/integration_tests/execution/conftest.py b/tests/workflows/integration_tests/execution/conftest.py index bf10c136b..befb8c77e 100644 --- a/tests/workflows/integration_tests/execution/conftest.py +++ b/tests/workflows/integration_tests/execution/conftest.py @@ -9,6 +9,9 @@ ASSETS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "assets")) ROCK_PAPER_SCISSORS_ASSETS = os.path.join(ASSETS_DIR, "rock_paper_scissors") +DUMMY_SECRET_ENV_VARIABLE = "DUMMY_SECRET" +os.environ[DUMMY_SECRET_ENV_VARIABLE] = "this-is-not-a-real-secret" + @pytest.fixture(scope="function") def crowd_image() -> np.ndarray: diff --git a/tests/workflows/integration_tests/execution/stub_plugins/plugin_testing_non_simd_step_with_optional_outputs/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/plugin_testing_non_simd_step_with_optional_outputs/__init__.py new file mode 100644 index 000000000..49084c1e1 --- /dev/null +++ b/tests/workflows/integration_tests/execution/stub_plugins/plugin_testing_non_simd_step_with_optional_outputs/__init__.py @@ -0,0 +1,221 @@ +from typing import List, Literal, Optional, Type + +from pydantic import Field + +from inference.core.workflows.execution_engine.entities.base import ( + Batch, + OutputDefinition, + WorkflowImageData, +) +from inference.core.workflows.execution_engine.entities.types import ( + IMAGE_KIND, + SECRET_KIND, + STRING_KIND, + Selector, +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) + + +class BlockAcceptingBatchesOfImagesManifest(WorkflowBlockManifest): + type: Literal["block_accepting_batches_of_images"] + image: Selector(kind=[IMAGE_KIND]) = Field( + title="Input Image", + description="The input image for this step.", + ) + secret: Selector(kind=[SECRET_KIND]) + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["image"] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="output", kind=[STRING_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.4.0,<2.0.0" + + +class BlockAcceptingBatchesOfImages(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockAcceptingBatchesOfImagesManifest + + def run(self, image: Batch[WorkflowImageData], secret: str) -> BlockResult: + return [{"output": "ok"}] * len(image) + + +class BlockAcceptingEmptyBatchesOfImagesManifest(WorkflowBlockManifest): + type: Literal["block_accepting_empty_batches_of_images"] + image: Selector(kind=[IMAGE_KIND]) = Field( + title="Input Image", + description="The input image for this step.", + ) + secret: Selector(kind=[SECRET_KIND]) + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["image"] + + @classmethod + def accepts_empty_values(cls) -> bool: + return True + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="output", kind=[STRING_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.4.0,<2.0.0" + + +class BlockAcceptingEmptyBatchesOfImages(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockAcceptingEmptyBatchesOfImagesManifest + + def run( + self, image: Batch[Optional[WorkflowImageData]], secret: Optional[str] + ) -> BlockResult: + return [{"output": "empty" if secret is None else "ok"}] * len(image) + + +class BlockAcceptingImagesManifest(WorkflowBlockManifest): + type: Literal["block_accepting_images"] + image: Selector(kind=[IMAGE_KIND]) = Field( + title="Input Image", + description="The input image for this step.", + ) + secret: Selector(kind=[SECRET_KIND]) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="output", kind=[STRING_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.4.0,<2.0.0" + + +class BlockAcceptingImages(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockAcceptingImagesManifest + + def run(self, image: WorkflowImageData, secret: str) -> BlockResult: + return {"output": "ok"} + + +class BlockAcceptingEmptyImagesManifest(WorkflowBlockManifest): + type: Literal["block_accepting_empty_images"] + image: Selector(kind=[IMAGE_KIND]) = Field( + title="Input Image", + description="The input image for this step.", + ) + secret: Selector(kind=[SECRET_KIND]) + + @classmethod + def accepts_empty_values(cls) -> bool: + return True + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="output", kind=[STRING_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.4.0,<2.0.0" + + +class BlockAcceptingEmptyImages(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockAcceptingEmptyImagesManifest + + def run( + self, image: Optional[WorkflowImageData], secret: Optional[str] + ) -> BlockResult: + return {"output": "empty" if secret is None else "ok"} + + +class BlockAcceptingScalarsManifest(WorkflowBlockManifest): + type: Literal["block_accepting_scalars"] + secret: Selector(kind=[SECRET_KIND]) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="output", kind=[SECRET_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.4.0,<2.0.0" + + +class BlockAcceptingScalars(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockAcceptingScalarsManifest + + def run(self, secret: str) -> BlockResult: + return {"output": secret} + + +class BlockAcceptingEmptyScalarsManifest(WorkflowBlockManifest): + type: Literal["block_accepting_empty_scalars"] + secret: Selector(kind=[SECRET_KIND]) + + @classmethod + def accepts_empty_values(cls) -> bool: + return True + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="output", kind=[SECRET_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.4.0,<2.0.0" + + +class BlockAcceptingEmptyScalars(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockAcceptingEmptyScalarsManifest + + def run(self, secret: Optional[str]) -> BlockResult: + return {"output": "modified-secret"} + + +def load_blocks() -> List[Type[WorkflowBlock]]: + return [ + BlockAcceptingEmptyScalars, + BlockAcceptingScalars, + BlockAcceptingEmptyImages, + BlockAcceptingImages, + BlockAcceptingEmptyBatchesOfImages, + BlockAcceptingBatchesOfImages, + ] diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_environment_secrets_store.py b/tests/workflows/integration_tests/execution/test_workflow_with_environment_secrets_store.py new file mode 100644 index 000000000..29838cff0 --- /dev/null +++ b/tests/workflows/integration_tests/execution/test_workflow_with_environment_secrets_store.py @@ -0,0 +1,918 @@ +import os +from unittest import mock +from unittest.mock import MagicMock + +import numpy as np + +from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS +from inference.core.managers.base import ModelManager +from inference.core.workflows.core_steps.common.entities import StepExecutionMode +from inference.core.workflows.execution_engine.core import ExecutionEngine +from inference.core.workflows.execution_engine.introspection import blocks_loader +from tests.workflows.integration_tests.execution.conftest import ( + DUMMY_SECRET_ENV_VARIABLE, +) + +WORKFLOW_EXPOSING_EXISTING_ENV_VARIABLE_TO_NON_BATCH_ORIENTED_STEP = { + "version": "1.4.0", + "inputs": [], + "steps": [ + { + "type": "roboflow_core/environment_secrets_store@v1", + "name": "vault", + "variables_storing_secrets": [DUMMY_SECRET_ENV_VARIABLE], + }, + { + "type": "block_accepting_scalars", + "name": "scalars_block", + "secret": f"$steps.vault.{DUMMY_SECRET_ENV_VARIABLE.lower()}", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "output", + "selector": "$steps.scalars_block.output", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_feeding_existing_env_variable_into_scalar_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_testing_non_simd_step_with_optional_outputs" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_EXPOSING_EXISTING_ENV_VARIABLE_TO_NON_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert isinstance(result, list), "Expected result to be list" + assert len(result) == 1, "Single image provided, so one output element expected" + assert set(result[0].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert result[0]["output"] == os.environ[DUMMY_SECRET_ENV_VARIABLE] + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_feeding_existing_env_variable_into_scalar_oriented_step_when_serialization_of_secret_output_expected( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_testing_non_simd_step_with_optional_outputs" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_EXPOSING_EXISTING_ENV_VARIABLE_TO_NON_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}, serialize_results=True) + + # then + assert isinstance(result, list), "Expected result to be list" + assert len(result) == 1, "Single image provided, so one output element expected" + assert set(result[0].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert result[0]["output"] is not None + assert result[0]["output"] != os.environ[DUMMY_SECRET_ENV_VARIABLE] + + +WORKFLOW_EXPOSING_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_ACCEPTING_BATCHES = { + "version": "1.4.0", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "roboflow_core/environment_secrets_store@v1", + "name": "vault", + "variables_storing_secrets": [DUMMY_SECRET_ENV_VARIABLE], + }, + { + "type": "block_accepting_batches_of_images", + "name": "model", + "image": "$inputs.image", + "secret": f"$steps.vault.{DUMMY_SECRET_ENV_VARIABLE.lower()}", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "output", + "selector": "$steps.model.output", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_feeding_existing_env_variable_into_simd_step_accepting_batches( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, + crowd_image: np.ndarray, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_testing_non_simd_step_with_optional_outputs" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_EXPOSING_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_ACCEPTING_BATCHES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={"image": [crowd_image, crowd_image]} + ) + + # then + assert isinstance(result, list), "Expected result to be list" + assert len(result) == 2, "Single image provided, so one output element expected" + assert set(result[0].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert set(result[1].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert result[0]["output"] == "ok" + assert result[1]["output"] == "ok" + + +WORKFLOW_EXPOSING_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_NOT_ACCEPTING_BATCHES = { + "version": "1.4.0", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "roboflow_core/environment_secrets_store@v1", + "name": "vault", + "variables_storing_secrets": [DUMMY_SECRET_ENV_VARIABLE], + }, + { + "type": "block_accepting_images", + "name": "model", + "image": "$inputs.image", + "secret": f"$steps.vault.{DUMMY_SECRET_ENV_VARIABLE.lower()}", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "output", + "selector": "$steps.model.output", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_feeding_existing_env_variable_into_simd_step_not_accepting_batches( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, + crowd_image: np.ndarray, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_testing_non_simd_step_with_optional_outputs" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_EXPOSING_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_NOT_ACCEPTING_BATCHES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={"image": [crowd_image, crowd_image]} + ) + + # then + assert isinstance(result, list), "Expected result to be list" + assert len(result) == 2, "Single image provided, so one output element expected" + assert set(result[0].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert set(result[1].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert result[0]["output"] == "ok" + assert result[1]["output"] == "ok" + + +WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SCALAR_STEP = { + "version": "1.4.0", + "inputs": [], + "steps": [ + { + "type": "roboflow_core/environment_secrets_store@v1", + "name": "vault", + "variables_storing_secrets": ["NON_EXISTING_ENV_VARIABLE"], + }, + { + "type": "block_accepting_scalars", + "name": "block", + "secret": f"$steps.vault.non_existing_env_variable", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "output", + "selector": "$steps.block.output", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_feeding_existing_env_variable_into_scalar_step_not_accepting_empty_inputs( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_testing_non_simd_step_with_optional_outputs" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SCALAR_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert isinstance(result, list), "Expected result to be list" + assert len(result) == 1, "Single image provided, so one output element expected" + assert set(result[0].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert result[0]["output"] is None + + +WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SCALAR_STEP_ACCEPTING_EMPTY_INPUTS = { + "version": "1.4.0", + "inputs": [], + "steps": [ + { + "type": "roboflow_core/environment_secrets_store@v1", + "name": "vault", + "variables_storing_secrets": ["NON_EXISTING_ENV_VARIABLE"], + }, + { + "type": "block_accepting_empty_scalars", + "name": "block", + "secret": f"$steps.vault.non_existing_env_variable", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "output", + "selector": "$steps.block.output", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_feeding_existing_env_variable_into_scalar_step_accepting_empty_inputs( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_testing_non_simd_step_with_optional_outputs" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SCALAR_STEP_ACCEPTING_EMPTY_INPUTS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert isinstance(result, list), "Expected result to be list" + assert len(result) == 1, "Single image provided, so one output element expected" + assert set(result[0].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert result[0]["output"] == "modified-secret" + + +WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_ACCEPTING_BATCHES = { + "version": "1.4.0", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "roboflow_core/environment_secrets_store@v1", + "name": "vault", + "variables_storing_secrets": ["NON_EXISTING_ENV_VARIABLE"], + }, + { + "type": "block_accepting_batches_of_images", + "name": "model", + "image": "$inputs.image", + "secret": f"$steps.vault.non_existing_env_variable", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "output", + "selector": "$steps.model.output", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_feeding_existing_env_variable_into_simd_step_accepting_batches( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, + crowd_image: np.ndarray, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_testing_non_simd_step_with_optional_outputs" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_ACCEPTING_BATCHES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={"image": [crowd_image, crowd_image]} + ) + + # then + assert isinstance(result, list), "Expected result to be list" + assert len(result) == 2, "Single image provided, so one output element expected" + assert set(result[0].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert set(result[1].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert result[0]["output"] is None + assert result[1]["output"] is None + + +WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_ACCEPTING_BATCHES_NESTED_SCENARIO = { + "version": "1.4.0", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "roboflow_core/roboflow_object_detection_model@v2", + "name": "detection", + "image": "$inputs.image", + "model_id": "yolov8n-640", + }, + { + "type": "roboflow_core/dynamic_crop@v1", + "name": "cropping", + "image": "$inputs.image", + "predictions": "$steps.detection.predictions", + }, + { + "type": "roboflow_core/environment_secrets_store@v1", + "name": "vault", + "variables_storing_secrets": ["NON_EXISTING_ENV_VARIABLE"], + }, + { + "type": "block_accepting_batches_of_images", + "name": "model", + "image": "$steps.cropping.crops", + "secret": f"$steps.vault.non_existing_env_variable", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "output", + "selector": "$steps.model.output", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_feeding_existing_env_variable_into_simd_step_accepting_batches_in_nested_scenario( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, + crowd_image: np.ndarray, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_testing_non_simd_step_with_optional_outputs" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_ACCEPTING_BATCHES_NESTED_SCENARIO, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={"image": [crowd_image, crowd_image]} + ) + + # then + assert isinstance(result, list), "Expected result to be list" + assert len(result) == 2, "Single image provided, so one output element expected" + assert set(result[0].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert set(result[1].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert result[0]["output"] == [None] * 12 + assert result[1]["output"] == [None] * 12 + + +WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_ACCEPTING_EMPTY_BATCHES = { + "version": "1.4.0", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "roboflow_core/environment_secrets_store@v1", + "name": "vault", + "variables_storing_secrets": ["NON_EXISTING_ENV_VARIABLE"], + }, + { + "type": "block_accepting_empty_batches_of_images", + "name": "model", + "image": "$inputs.image", + "secret": f"$steps.vault.non_existing_env_variable", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "output", + "selector": "$steps.model.output", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_feeding_existing_env_variable_into_simd_step_accepting_empty_batches( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, + crowd_image: np.ndarray, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_testing_non_simd_step_with_optional_outputs" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_ACCEPTING_EMPTY_BATCHES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={"image": [crowd_image, crowd_image]} + ) + + # then + assert isinstance(result, list), "Expected result to be list" + assert len(result) == 2, "Single image provided, so one output element expected" + assert set(result[0].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert set(result[1].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert result[0]["output"] is "empty" + assert result[1]["output"] is "empty" + + +WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_ACCEPTING_EMPTY_BATCHES_NESTED = { + "version": "1.4.0", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "roboflow_core/roboflow_object_detection_model@v2", + "name": "detection", + "image": "$inputs.image", + "model_id": "yolov8n-640", + }, + { + "type": "roboflow_core/dynamic_crop@v1", + "name": "cropping", + "image": "$inputs.image", + "predictions": "$steps.detection.predictions", + }, + { + "type": "roboflow_core/environment_secrets_store@v1", + "name": "vault", + "variables_storing_secrets": ["NON_EXISTING_ENV_VARIABLE"], + }, + { + "type": "block_accepting_empty_batches_of_images", + "name": "model", + "image": "$steps.cropping.crops", + "secret": f"$steps.vault.non_existing_env_variable", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "output", + "selector": "$steps.model.output", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_feeding_existing_env_variable_into_simd_step_accepting_empty_batches_nested_scenario( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, + crowd_image: np.ndarray, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_testing_non_simd_step_with_optional_outputs" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_ACCEPTING_EMPTY_BATCHES_NESTED, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={"image": [crowd_image, crowd_image]} + ) + + # then + assert isinstance(result, list), "Expected result to be list" + assert len(result) == 2, "Single image provided, so one output element expected" + assert set(result[0].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert set(result[1].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert result[0]["output"] == ["empty"] * 12 + assert result[1]["output"] == ["empty"] * 12 + + +# ################################ + + +WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_NOT_ACCEPTING_BATCHES = { + "version": "1.4.0", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "roboflow_core/environment_secrets_store@v1", + "name": "vault", + "variables_storing_secrets": ["NON_EXISTING_ENV_VARIABLE"], + }, + { + "type": "block_accepting_images", + "name": "model", + "image": "$inputs.image", + "secret": f"$steps.vault.non_existing_env_variable", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "output", + "selector": "$steps.model.output", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_feeding_existing_env_variable_into_simd_step_not_accepting_batches( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, + crowd_image: np.ndarray, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_testing_non_simd_step_with_optional_outputs" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_NOT_ACCEPTING_BATCHES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={"image": [crowd_image, crowd_image]} + ) + + # then + assert isinstance(result, list), "Expected result to be list" + assert len(result) == 2, "Single image provided, so one output element expected" + assert set(result[0].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert set(result[1].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert result[0]["output"] is None + assert result[1]["output"] is None + + +WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_NOT_ACCEPTING_BATCHES_NESTED_SCENARIO = { + "version": "1.4.0", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "roboflow_core/roboflow_object_detection_model@v2", + "name": "detection", + "image": "$inputs.image", + "model_id": "yolov8n-640", + }, + { + "type": "roboflow_core/dynamic_crop@v1", + "name": "cropping", + "image": "$inputs.image", + "predictions": "$steps.detection.predictions", + }, + { + "type": "roboflow_core/environment_secrets_store@v1", + "name": "vault", + "variables_storing_secrets": ["NON_EXISTING_ENV_VARIABLE"], + }, + { + "type": "block_accepting_images", + "name": "model", + "image": "$steps.cropping.crops", + "secret": f"$steps.vault.non_existing_env_variable", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "output", + "selector": "$steps.model.output", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_feeding_existing_env_variable_into_simd_step_not_accepting_batches_in_nested_scenario( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, + crowd_image: np.ndarray, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_testing_non_simd_step_with_optional_outputs" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_NOT_ACCEPTING_BATCHES_NESTED_SCENARIO, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={"image": [crowd_image, crowd_image]} + ) + + # then + assert isinstance(result, list), "Expected result to be list" + assert len(result) == 2, "Single image provided, so one output element expected" + assert set(result[0].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert set(result[1].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert result[0]["output"] == [None] * 12 + assert result[1]["output"] == [None] * 12 + + +WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_ACCEPTING_EMPTY_VALUES = { + "version": "1.4.0", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "roboflow_core/environment_secrets_store@v1", + "name": "vault", + "variables_storing_secrets": ["NON_EXISTING_ENV_VARIABLE"], + }, + { + "type": "block_accepting_empty_images", + "name": "model", + "image": "$inputs.image", + "secret": f"$steps.vault.non_existing_env_variable", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "output", + "selector": "$steps.model.output", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_feeding_existing_env_variable_into_simd_step_accepting_empty_values( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, + crowd_image: np.ndarray, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_testing_non_simd_step_with_optional_outputs" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_ACCEPTING_EMPTY_VALUES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={"image": [crowd_image, crowd_image]} + ) + + # then + assert isinstance(result, list), "Expected result to be list" + assert len(result) == 2, "Single image provided, so one output element expected" + assert set(result[0].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert set(result[1].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert result[0]["output"] is "empty" + assert result[1]["output"] is "empty" + + +WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_ACCEPTING_EMPTY_VALUES_NESTED = { + "version": "1.4.0", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "roboflow_core/roboflow_object_detection_model@v2", + "name": "detection", + "image": "$inputs.image", + "model_id": "yolov8n-640", + }, + { + "type": "roboflow_core/dynamic_crop@v1", + "name": "cropping", + "image": "$inputs.image", + "predictions": "$steps.detection.predictions", + }, + { + "type": "roboflow_core/environment_secrets_store@v1", + "name": "vault", + "variables_storing_secrets": ["NON_EXISTING_ENV_VARIABLE"], + }, + { + "type": "block_accepting_empty_images", + "name": "model", + "image": "$steps.cropping.crops", + "secret": f"$steps.vault.non_existing_env_variable", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "output", + "selector": "$steps.model.output", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_feeding_existing_env_variable_into_simd_step_accepting_empty_values_nested_scenario( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, + crowd_image: np.ndarray, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_testing_non_simd_step_with_optional_outputs" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_ACCEPTING_EMPTY_VALUES_NESTED, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={"image": [crowd_image, crowd_image]} + ) + + # then + assert isinstance(result, list), "Expected result to be list" + assert len(result) == 2, "Single image provided, so one output element expected" + assert set(result[0].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert set(result[1].keys()) == { + "output", + }, "Expected all declared outputs to be delivered" + assert result[0]["output"] == ["empty"] * 12 + assert result[1]["output"] == ["empty"] * 12 diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_google_vision_ocr.py b/tests/workflows/integration_tests/execution/test_workflow_with_google_vision_ocr.py index fe5368d45..adc7c540a 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_google_vision_ocr.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_google_vision_ocr.py @@ -105,13 +105,13 @@ def test_workflow_with_google_ocr_when_text_should_be_detected( "text_detections", }, "Expected all outputs to be delivered" assert ( - result[0]["extracted_text"] == "2398027\n2398023\nKn\n239+8072" + result[0]["extracted_text"] == "2398027\nKn\n239 8072" ), "Extracted text should match reference" assert not np.allclose( license_plate_image, result[0]["text_visualised"].numpy_image ), "Expected that visualisation will change the output image" assert ( - len(result[0]["text_detections"]) == 4 + len(result[0]["text_detections"]) == 3 ), "Expected 4 text regions to be detected" diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_slack_notification.py b/tests/workflows/integration_tests/execution/test_workflow_with_slack_notification.py index cf4adee5e..3476e4a80 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_slack_notification.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_slack_notification.py @@ -82,7 +82,7 @@ def test_minimalist_workflow_with_slack_notifications() -> None: ], "steps": [ { - "type": "roboflow_core/roboflow_keypoint_detection_model@v2", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "detection", "image": "$inputs.image", "model_id": "$inputs.model_id", @@ -171,7 +171,7 @@ def test_workflow_with_message_based_on_other_step_output( ], "steps": [ { - "type": "roboflow_core/roboflow_keypoint_detection_model@v2", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "detection", "image": "$inputs.image", "model_id": "$inputs.model_id", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_twillio_notification.py b/tests/workflows/integration_tests/execution/test_workflow_with_twillio_notification.py index 19c87e3bd..445338483 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_twillio_notification.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_twillio_notification.py @@ -33,7 +33,7 @@ ], "steps": [ { - "type": "roboflow_core/roboflow_keypoint_detection_model@v2", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "detection", "image": "$inputs.image", "model_id": "$inputs.model_id", From 4d451e5711c94365ad37ca936e37dc9159a1ebce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 29 Nov 2024 13:09:51 +0100 Subject: [PATCH 07/10] Add test to ensure env-based secret store cannot be used on hosted platform --- .../hosted_platform_tests/test_workflows.py | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/tests/inference/hosted_platform_tests/test_workflows.py b/tests/inference/hosted_platform_tests/test_workflows.py index b3ff50620..fa0ce6a1c 100644 --- a/tests/inference/hosted_platform_tests/test_workflows.py +++ b/tests/inference/hosted_platform_tests/test_workflows.py @@ -979,3 +979,46 @@ def test_discovering_interface_of_invalid_workflow_from_payload( # then assert response.status_code == 400 + + +WORKFLOW_WITH_ENV_SECRET_STORE = { + "version": "1.4.0", + "inputs": [], + "steps": [ + { + "type": "roboflow_core/environment_secrets_store@v1", + "name": "vault", + "variables_storing_secrets": ["SOME_TOKEN"], + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "status", + "selector": "$steps.vault.some_token", + }, + ], +} + + +@pytest.mark.flaky(retries=4, delay=1) +def test_extracting_secrets_from_env_based_secret_store( + object_detection_service_url: str, +) -> None: + # when + response = requests.post( + f"{object_detection_service_url}/workflows/run", + json={ + "specification": WORKFLOW_WITH_ENV_SECRET_STORE, + "api_key": ROBOFLOW_API_KEY, + "inputs": {}, + }, + ) + + # then + assert response.status_code == 500 + response_data = response.json() + assert ( + "`roboflow_core/environment_secrets_store@v1` block cannot run in this environment" in response_data["message"] + ), "Expected execution to be prevented" + From 298342f4660cac5f917e06cc47978d9b988d8e43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 29 Nov 2024 14:01:07 +0100 Subject: [PATCH 08/10] Add unit tests for the block --- .../core_steps/sinks/slack/notification/v1.py | 7 +- .../core_steps/sinks/twilio/sms/v1.py | 14 +- .../hosted_platform_tests/test_workflows.py | 4 +- ...workflow_with_environment_secrets_store.py | 3 - .../sinks/test_slack_notification.py | 531 +++++++++++++++++ .../sinks/test_twilio_sms_notification.py | 548 ++++++++++++++++++ 6 files changed, 1098 insertions(+), 9 deletions(-) create mode 100644 tests/workflows/unit_tests/core_steps/sinks/test_slack_notification.py create mode 100644 tests/workflows/unit_tests/core_steps/sinks/test_twilio_sms_notification.py diff --git a/inference/core/workflows/core_steps/sinks/slack/notification/v1.py b/inference/core/workflows/core_steps/sinks/slack/notification/v1.py index 332edc034..868ae4b8a 100644 --- a/inference/core/workflows/core_steps/sinks/slack/notification/v1.py +++ b/inference/core/workflows/core_steps/sinks/slack/notification/v1.py @@ -309,11 +309,14 @@ def send_slack_notification( logging.warning(f"Could not send Slack notification. Error: {error_details}") return ( True, - f"Failed to Slack notification. Internal error details: {error_details}", + f"Failed to send Slack notification. Internal error details: {error_details}", ) except Exception as error: logging.warning(f"Could not send Slack notification. Error: {str(error)}") - return True, f"Failed to Slack notification. Internal error details: {error}" + return ( + True, + f"Failed to send Slack notification. Internal error details: {error}", + ) def _send_slack_notification( diff --git a/inference/core/workflows/core_steps/sinks/twilio/sms/v1.py b/inference/core/workflows/core_steps/sinks/twilio/sms/v1.py index 629faeae1..e36590d76 100644 --- a/inference/core/workflows/core_steps/sinks/twilio/sms/v1.py +++ b/inference/core/workflows/core_steps/sinks/twilio/sms/v1.py @@ -152,6 +152,13 @@ def ensure_cooldown_seconds_within_bounds(cls, value: Any) -> dict: ) return value + @field_validator("length_limit") + @classmethod + def ensure_length_limit_within_bounds(cls, value: Any) -> dict: + if isinstance(value, int) and value <= 0: + raise ValueError(f"Length limit for SMS must be greater than 0") + return value + @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ @@ -340,5 +347,8 @@ def send_sms_notification( ) return False, "Notification sent successfully" except Exception as error: - logging.warning(f"Could not send Slack notification. Error: {str(error)}") - return True, f"Failed to Slack notification. Internal error details: {error}" + logging.warning(f"Could not send Twilio SMS notification. Error: {str(error)}") + return ( + True, + f"Failed to send Twilio SMS notification. Internal error details: {error}", + ) diff --git a/tests/inference/hosted_platform_tests/test_workflows.py b/tests/inference/hosted_platform_tests/test_workflows.py index fa0ce6a1c..c20a31d01 100644 --- a/tests/inference/hosted_platform_tests/test_workflows.py +++ b/tests/inference/hosted_platform_tests/test_workflows.py @@ -1019,6 +1019,6 @@ def test_extracting_secrets_from_env_based_secret_store( assert response.status_code == 500 response_data = response.json() assert ( - "`roboflow_core/environment_secrets_store@v1` block cannot run in this environment" in response_data["message"] + "`roboflow_core/environment_secrets_store@v1` block cannot run in this environment" + in response_data["message"] ), "Expected execution to be prevented" - diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_environment_secrets_store.py b/tests/workflows/integration_tests/execution/test_workflow_with_environment_secrets_store.py index 29838cff0..11c8c687b 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_environment_secrets_store.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_environment_secrets_store.py @@ -631,9 +631,6 @@ def test_feeding_existing_env_variable_into_simd_step_accepting_empty_batches_ne assert result[1]["output"] == ["empty"] * 12 -# ################################ - - WORKFLOW_EXPOSING_NON_EXISTING_ENV_VARIABLE_TO_SIMD_STEP_NOT_ACCEPTING_BATCHES = { "version": "1.4.0", "inputs": [{"type": "WorkflowImage", "name": "image"}], diff --git a/tests/workflows/unit_tests/core_steps/sinks/test_slack_notification.py b/tests/workflows/unit_tests/core_steps/sinks/test_slack_notification.py new file mode 100644 index 000000000..1b76230a5 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/sinks/test_slack_notification.py @@ -0,0 +1,531 @@ +import time +from unittest import mock +from unittest.mock import MagicMock + +import pytest +from pydantic import ValidationError +from slack_sdk.errors import SlackApiError +from slack_sdk.web import SlackResponse + +from inference.core.cache import MemoryCache +from inference.core.workflows.core_steps.common.query_language.entities.operations import ( + StringToUpperCase, +) +from inference.core.workflows.core_steps.sinks.slack.notification import v1 +from inference.core.workflows.core_steps.sinks.slack.notification.v1 import ( + BlockManifest, + SlackNotificationBlockV1, + format_message, + send_slack_notification, +) + + +def test_manifest_parsing_when_the_input_is_valid() -> None: + # given + raw_manifest = { + "type": "roboflow_core/slack_notification@v1", + "name": "slack_notification", + "slack_token": "$inputs.slack_token", + "message": "My message", + "channel": "$inputs.slack_channel", + "message_parameters": { + "image": "$inputs.image", + }, + "message_parameters_operations": { + "image": [{"type": "ConvertImageToJPEG"}], + }, + "attachments": { + "form_field": "$inputs.query_parameter", + }, + "fire_and_forget": True, + "cooldown_seconds": "$inputs.cooldown", + "cooldown_session_key": "unique-session-key", + } + + # when + result = BlockManifest.model_validate(raw_manifest) + + # then + assert result == BlockManifest( + type="roboflow_core/slack_notification@v1", + name="slack_notification", + slack_token="$inputs.slack_token", + channel="$inputs.slack_channel", + message="My message", + message_parameters={ + "image": "$inputs.image", + }, + message_parameters_operations={ + "image": [{"type": "ConvertImageToJPEG"}], + }, + attachments={ + "form_field": "$inputs.query_parameter", + }, + fire_and_forget=True, + cooldown_seconds="$inputs.cooldown", + cooldown_session_key="unique-session-key", + ) + + +def test_manifest_parsing_when_cooldown_seconds_is_invalid() -> None: + # given + raw_manifest = { + "type": "roboflow_core/slack_notification@v1", + "name": "slack_notification", + "slack_token": "$inputs.slack_token", + "message": "My message", + "channel": "$inputs.slack_channel", + "message_parameters": { + "image": "$inputs.image", + }, + "message_parameters_operations": { + "image": [{"type": "ConvertImageToJPEG"}], + }, + "attachments": { + "form_field": "$inputs.query_parameter", + }, + "fire_and_forget": True, + "cooldown_seconds": -1, + "cooldown_session_key": "unique-session-key", + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(raw_manifest) + + +def test_format_message_when_multiple_occurrences_of_the_same_parameter_exist() -> None: + # given + message = "This is example param: {{{ $parameters.param }}} - and this is aloso param: `{{ $parameters.param }}`" + + # when + result = format_message( + message=message, + message_parameters={"param": "some"}, + message_parameters_operations={}, + ) + + # then + assert result == "This is example param: {some} - and this is aloso param: `some`" + + +def test_format_message_when_multiple_parameters_exist() -> None: + # given + message = "This is example param: {{ $parameters.param }} - and this is aloso param: `{{ $parameters.other }}`" + + # when + result = format_message( + message=message, + message_parameters={"param": "some", "other": 42}, + message_parameters_operations={}, + ) + + # then + assert result == "This is example param: some - and this is aloso param: `42`" + + +def test_format_message_when_different_combinations_of_whitespaces_exist_in_template_parameter_anchor() -> ( + None +): + # given + message = "{{{ $parameters.param }}} - {{$parameters.param }} - {{ $parameters.param}} - {{ $parameters.param }}" + + # when + result = format_message( + message=message, + message_parameters={"param": "some"}, + message_parameters_operations={}, + ) + + # then + assert result == "{some} - some - some - some" + + +def test_format_message_when_operation_to_apply_on_parameter() -> None: + # given + message = "This is example param: {{{ $parameters.param }}} - and this is aloso param: `{{ $parameters.param }}`" + + # when + result = format_message( + message=message, + message_parameters={"param": "some"}, + message_parameters_operations={ + "param": [StringToUpperCase(type="StringToUpperCase")] + }, + ) + + # then + assert result == "This is example param: {SOME} - and this is aloso param: `SOME`" + + +def test_send_slack_notification_when_slack_api_error_raised() -> None: + # given + client = MagicMock() + client.chat_postMessage.side_effect = SlackApiError( + message="some", + response=SlackResponse( + client=client, + http_verb="", + api_url="", + req_args={}, + data={}, + headers={}, + status_code=500, + ), + ) + + # when + result = send_slack_notification( + client=client, + channel="some", + message="msg", + attachments={}, + ) + + # then + assert result[0] is True + assert result[1].startswith("Failed to send Slack notification") + + +def test_send_slack_notification_when_generic_error_raised() -> None: + # given + client = MagicMock() + client.chat_postMessage.side_effect = Exception() + + # when + result = send_slack_notification( + client=client, + channel="some", + message="msg", + attachments={}, + ) + + # then + assert result[0] is True + assert result[1].startswith("Failed to send Slack notification") + + +def test_send_slack_notification_when_operation_succeeds_without_attachments() -> None: + # given + client = MagicMock() + + # when + result = send_slack_notification( + client=client, + channel="some", + message="msg", + attachments={}, + ) + + # then + client.chat_postMessage.assert_called_once_with( + channel="some", + text="msg", + ) + assert result[0] is False + + +def test_send_slack_notification_when_operation_succeeds_with_attachments() -> None: + # given + client = MagicMock() + + # when + result = send_slack_notification( + client=client, + channel="some", + message="msg", + attachments={"some": b"data"}, + ) + + # then + client.chat_postMessage.files_upload_v2( + channel="some", + initial_comment="msg", + file_uploads=[{"title": "some", "content": b"data"}], + ) + assert result[0] is False + + +def test_cooldown_in_slack_notification_block() -> None: + # given + thread_pool_executor = MagicMock() + block = SlackNotificationBlockV1( + cache=MemoryCache(), + background_tasks=None, + thread_pool_executor=thread_pool_executor, + ) + + # when + results = [] + for _ in range(2): + result = block.run( + slack_token="token", + message="some", + channel="channel", + message_parameters={}, + message_parameters_operations={}, + attachments={}, + fire_and_forget=True, + disable_sink=False, + cooldown_seconds=100, + cooldown_session_key="unique", + ) + results.append(result) + + # then + assert results[0] == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + assert results[1] == { + "error_status": False, + "throttling_status": True, + "message": "Sink cooldown applies", + } + + +def test_cooldown_in_slack_notification_bloc_for_separate_sessionsk() -> None: + # given + thread_pool_executor = MagicMock() + block = SlackNotificationBlockV1( + cache=MemoryCache(), + background_tasks=None, + thread_pool_executor=thread_pool_executor, + ) + + # when + results = [] + for i in range(2): + result = block.run( + slack_token="token", + message="some", + channel="channel", + message_parameters={}, + message_parameters_operations={}, + attachments={}, + fire_and_forget=True, + disable_sink=False, + cooldown_seconds=100, + cooldown_session_key=f"unique-{i}", + ) + results.append(result) + + # then + assert results[0] == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + assert results[1] == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + + +def test_disabling_cooldown_in_slack_notification_block() -> None: + # given + thread_pool_executor = MagicMock() + block = SlackNotificationBlockV1( + cache=MemoryCache(), + background_tasks=None, + thread_pool_executor=thread_pool_executor, + ) + + # when + results = [] + for _ in range(2): + result = block.run( + slack_token="token", + message="some", + channel="channel", + message_parameters={}, + message_parameters_operations={}, + attachments={}, + fire_and_forget=True, + disable_sink=False, + cooldown_seconds=0, + cooldown_session_key="unique", + ) + results.append(result) + + # then + assert results[0] == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + assert results[1] == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + + +def test_cooldown_recovery_in_slack_notification_block() -> None: + # given + thread_pool_executor = MagicMock() + block = SlackNotificationBlockV1( + cache=MemoryCache(), + background_tasks=None, + thread_pool_executor=thread_pool_executor, + ) + + # when + results = [] + for _ in range(2): + result = block.run( + slack_token="token", + message="some", + channel="channel", + message_parameters={}, + message_parameters_operations={}, + attachments={}, + fire_and_forget=True, + disable_sink=False, + cooldown_seconds=1, + cooldown_session_key="unique", + ) + results.append(result) + time.sleep(1.5) + + # then + assert results[0] == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + assert results[1] == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + + +@mock.patch.object(v1, "send_slack_notification") +def test_sending_slack_notification_synchronously( + send_slack_notification_mock: MagicMock, +) -> None: + # given + send_slack_notification_mock.return_value = (False, "ok") + block = SlackNotificationBlockV1( + cache=MemoryCache(), + background_tasks=None, + thread_pool_executor=MagicMock(), + ) + + # when + result = block.run( + slack_token="token", + message="some", + channel="channel", + message_parameters={}, + message_parameters_operations={}, + attachments={}, + fire_and_forget=False, + disable_sink=False, + cooldown_seconds=1, + cooldown_session_key="unique", + ) + + # then + assert result == { + "error_status": False, + "throttling_status": False, + "message": "ok", + } + + +def test_disabling_slack_notification() -> None: + # given + block = SlackNotificationBlockV1( + cache=MemoryCache(), + background_tasks=None, + thread_pool_executor=MagicMock(), + ) + + # when + result = block.run( + slack_token="token", + message="some", + channel="channel", + message_parameters={}, + message_parameters_operations={}, + attachments={}, + fire_and_forget=False, + disable_sink=True, + cooldown_seconds=1, + cooldown_session_key="unique", + ) + + # then + assert result == { + "error_status": False, + "throttling_status": False, + "message": "Sink was disabled by parameter `disable_sink`", + } + + +def test_sending_slack_notification_asynchronously_in_background_tasks() -> None: + # given + background_tasks = MagicMock() + block = SlackNotificationBlockV1( + cache=MemoryCache(), + background_tasks=background_tasks, + thread_pool_executor=None, + ) + + # when + result = block.run( + slack_token="token", + message="some", + channel="channel", + message_parameters={}, + message_parameters_operations={}, + attachments={}, + fire_and_forget=True, + disable_sink=False, + cooldown_seconds=1, + cooldown_session_key="unique", + ) + + # then + assert result == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + background_tasks.add_task.assert_called_once() + + +def test_sending_slack_notification_asynchronously_in_thread_pool_executor() -> None: + # given + thread_pool_executor = MagicMock() + block = SlackNotificationBlockV1( + cache=MemoryCache(), + background_tasks=None, + thread_pool_executor=thread_pool_executor, + ) + + # when + result = block.run( + slack_token="token", + message="some", + channel="channel", + message_parameters={}, + message_parameters_operations={}, + attachments={}, + fire_and_forget=True, + disable_sink=False, + cooldown_seconds=1, + cooldown_session_key="unique", + ) + + # then + assert result == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + thread_pool_executor.submit.assert_called_once() diff --git a/tests/workflows/unit_tests/core_steps/sinks/test_twilio_sms_notification.py b/tests/workflows/unit_tests/core_steps/sinks/test_twilio_sms_notification.py new file mode 100644 index 000000000..0cb148602 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/sinks/test_twilio_sms_notification.py @@ -0,0 +1,548 @@ +import time +from unittest import mock +from unittest.mock import MagicMock + +import pytest +from pydantic import ValidationError + +from inference.core.cache import MemoryCache +from inference.core.workflows.core_steps.common.query_language.entities.operations import ( + StringToUpperCase, +) +from inference.core.workflows.core_steps.sinks.twilio.sms import v1 +from inference.core.workflows.core_steps.sinks.twilio.sms.v1 import ( + BlockManifest, + TwilioSMSNotificationBlockV1, + format_message, + send_sms_notification, +) + + +def test_manifest_parsing_when_the_input_is_valid() -> None: + # given + raw_manifest = { + "type": "roboflow_core/twilio_sms_notification@v1", + "name": "twilio_sms_notification", + "twilio_account_sid": "$inputs.sid", + "twilio_auth_token": "$inputs.auth_token", + "message": "My message", + "sender_number": "some", + "receiver_number": "other", + "message_parameters": { + "image": "$inputs.image", + }, + "message_parameters_operations": { + "image": [{"type": "ConvertImageToJPEG"}], + }, + "fire_and_forget": True, + "cooldown_seconds": "$inputs.cooldown", + "cooldown_session_key": "unique-session-key", + "length_limit": 160, + } + + # when + result = BlockManifest.model_validate(raw_manifest) + + # then + assert result == BlockManifest( + type="roboflow_core/twilio_sms_notification@v1", + name="twilio_sms_notification", + twilio_account_sid="$inputs.sid", + twilio_auth_token="$inputs.auth_token", + message="My message", + sender_number="some", + receiver_number="other", + message_parameters={ + "image": "$inputs.image", + }, + message_parameters_operations={ + "image": [{"type": "ConvertImageToJPEG"}], + }, + fire_and_forget=True, + cooldown_seconds="$inputs.cooldown", + cooldown_session_key="unique-session-key", + length_limit=160, + ) + + +def test_manifest_parsing_when_cooldown_seconds_is_invalid() -> None: + # given + raw_manifest = { + "type": "roboflow_core/twilio_sms_notification@v1", + "name": "twilio_sms_notification", + "twilio_account_sid": "$inputs.sid", + "twilio_auth_token": "$inputs.auth_token", + "message": "My message", + "sender_number": "some", + "receiver_number": "other", + "message_parameters": { + "image": "$inputs.image", + }, + "message_parameters_operations": { + "image": [{"type": "ConvertImageToJPEG"}], + }, + "fire_and_forget": True, + "cooldown_seconds": -1, + "cooldown_session_key": "unique-session-key", + "length_limit": 160, + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(raw_manifest) + + +def test_manifest_parsing_when_length_limit_is_invalid() -> None: + # given + raw_manifest = { + "type": "roboflow_core/twilio_sms_notification@v1", + "name": "twilio_sms_notification", + "twilio_account_sid": "$inputs.sid", + "twilio_auth_token": "$inputs.auth_token", + "message": "My message", + "sender_number": "some", + "receiver_number": "other", + "message_parameters": { + "image": "$inputs.image", + }, + "message_parameters_operations": { + "image": [{"type": "ConvertImageToJPEG"}], + }, + "fire_and_forget": True, + "cooldown_seconds": 5, + "cooldown_session_key": "unique-session-key", + "length_limit": 0, + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(raw_manifest) + + +def test_format_message_when_multiple_occurrences_of_the_same_parameter_exist() -> None: + # given + message = "This is example param: {{{ $parameters.param }}} - and this is aloso param: `{{ $parameters.param }}`" + + # when + result = format_message( + message=message, + message_parameters={"param": "some"}, + message_parameters_operations={}, + length_limit=1024, + ) + + # then + assert result == "This is example param: {some} - and this is aloso param: `some`" + + +def test_format_message_when_multiple_parameters_exist() -> None: + # given + message = "This is example param: {{ $parameters.param }} - and this is aloso param: `{{ $parameters.other }}`" + + # when + result = format_message( + message=message, + message_parameters={"param": "some", "other": 42}, + message_parameters_operations={}, + length_limit=1024, + ) + + # then + assert result == "This is example param: some - and this is aloso param: `42`" + + +def test_format_message_when_different_combinations_of_whitespaces_exist_in_template_parameter_anchor() -> ( + None +): + # given + message = "{{{ $parameters.param }}} - {{$parameters.param }} - {{ $parameters.param}} - {{ $parameters.param }}" + + # when + result = format_message( + message=message, + message_parameters={"param": "some"}, + message_parameters_operations={}, + length_limit=1024, + ) + + # then + assert result == "{some} - some - some - some" + + +def test_format_message_when_operation_to_apply_on_parameter() -> None: + # given + message = "This is example param: {{{ $parameters.param }}} - and this is aloso param: `{{ $parameters.param }}`" + + # when + result = format_message( + message=message, + message_parameters={"param": "some"}, + message_parameters_operations={ + "param": [StringToUpperCase(type="StringToUpperCase")] + }, + length_limit=1024, + ) + + # then + assert result == "This is example param: {SOME} - and this is aloso param: `SOME`" + + +def test_format_message_when_output_needs_to_be_truncated() -> None: + # given + message = "This is example param: {{{ $parameters.param }}} - and this is aloso param: `{{ $parameters.param }}`" + + # when + result = format_message( + message=message, + message_parameters={"param": "some"}, + message_parameters_operations={ + "param": [StringToUpperCase(type="StringToUpperCase")] + }, + length_limit=32, + ) + + # then + assert result == "This is example param: {SO [...]" + assert len(result) == 32 + + +def test_send_twilio_sms_notification_when_error_raised() -> None: + # given + client = MagicMock() + client.messages.create = Exception() + + # when + result = send_sms_notification( + client=client, + message="msg", + sender_number="some", + receiver_number="other", + ) + + # then + assert result[0] is True + assert result[1].startswith("Failed to send Twilio SMS notification") + + +def test_send_twilio_sms_notification_when_operation_succeeded() -> None: + # given + client = MagicMock() + + # when + result = send_sms_notification( + client=client, + message="msg", + sender_number="some", + receiver_number="other", + ) + + # then + assert result[0] is False + client.messages.create.assert_called_once_with( + body="msg", + from_="some", + to="other", + ) + + +def test_cooldown_in_twilio_sms_notification_block() -> None: + # given + thread_pool_executor = MagicMock() + block = TwilioSMSNotificationBlockV1( + cache=MemoryCache(), + background_tasks=None, + thread_pool_executor=thread_pool_executor, + ) + + # when + results = [] + for _ in range(2): + result = block.run( + twilio_account_sid="token", + twilio_auth_token="token", + message="some", + sender_number="from", + receiver_number="to", + message_parameters={}, + message_parameters_operations={}, + fire_and_forget=True, + disable_sink=False, + cooldown_seconds=100, + cooldown_session_key="unique", + length_limit=128, + ) + results.append(result) + + # then + assert results[0] == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + assert results[1] == { + "error_status": False, + "throttling_status": True, + "message": "Sink cooldown applies", + } + + +def test_cooldown_in_twilio_sms_notification_block_for_separate_sessions() -> None: + # given + thread_pool_executor = MagicMock() + block = TwilioSMSNotificationBlockV1( + cache=MemoryCache(), + background_tasks=None, + thread_pool_executor=thread_pool_executor, + ) + + # when + results = [] + for i in range(2): + result = block.run( + twilio_account_sid="token", + twilio_auth_token="token", + message="some", + sender_number="from", + receiver_number="to", + message_parameters={}, + message_parameters_operations={}, + fire_and_forget=True, + disable_sink=False, + cooldown_seconds=100, + cooldown_session_key=f"unique-{i}", + length_limit=128, + ) + results.append(result) + + # then + assert results[0] == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + assert results[0] == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + + +def test_disabling_cooldown_in_twilio_sms_notification_block() -> None: + # given + thread_pool_executor = MagicMock() + block = TwilioSMSNotificationBlockV1( + cache=MemoryCache(), + background_tasks=None, + thread_pool_executor=thread_pool_executor, + ) + + # when + results = [] + for _ in range(2): + result = block.run( + twilio_account_sid="token", + twilio_auth_token="token", + message="some", + sender_number="from", + receiver_number="to", + message_parameters={}, + message_parameters_operations={}, + fire_and_forget=True, + disable_sink=False, + cooldown_seconds=0, + cooldown_session_key=f"unique", + length_limit=128, + ) + results.append(result) + + # then + assert results[0] == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + assert results[1] == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + + +def test_cooldown_recovery_in_twilio_sms_notification_block() -> None: + # given + thread_pool_executor = MagicMock() + block = TwilioSMSNotificationBlockV1( + cache=MemoryCache(), + background_tasks=None, + thread_pool_executor=thread_pool_executor, + ) + + # when + results = [] + for _ in range(2): + result = block.run( + twilio_account_sid="token", + twilio_auth_token="token", + message="some", + sender_number="from", + receiver_number="to", + message_parameters={}, + message_parameters_operations={}, + fire_and_forget=True, + disable_sink=False, + cooldown_seconds=1, + cooldown_session_key=f"unique", + length_limit=128, + ) + results.append(result) + time.sleep(1.5) + + # then + assert results[0] == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + assert results[1] == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + + +@mock.patch.object(v1, "send_sms_notification") +def test_sending_twilio_sms_notification_synchronously( + send_sms_notification_mock: MagicMock, +) -> None: + # given + send_sms_notification_mock.return_value = (False, "ok") + block = TwilioSMSNotificationBlockV1( + cache=MemoryCache(), + background_tasks=None, + thread_pool_executor=MagicMock(), + ) + + # when + result = block.run( + twilio_account_sid="token", + twilio_auth_token="token", + message="some", + sender_number="from", + receiver_number="to", + message_parameters={}, + message_parameters_operations={}, + fire_and_forget=False, + disable_sink=False, + cooldown_seconds=1, + cooldown_session_key=f"unique", + length_limit=128, + ) + + # then + assert result == { + "error_status": False, + "throttling_status": False, + "message": "ok", + } + + +def test_disabling_twilio_sms_notification() -> None: + # given + block = TwilioSMSNotificationBlockV1( + cache=MemoryCache(), + background_tasks=None, + thread_pool_executor=MagicMock(), + ) + + # when + result = block.run( + twilio_account_sid="token", + twilio_auth_token="token", + message="some", + sender_number="from", + receiver_number="to", + message_parameters={}, + message_parameters_operations={}, + fire_and_forget=False, + disable_sink=True, + cooldown_seconds=1, + cooldown_session_key=f"unique", + length_limit=128, + ) + + # then + assert result == { + "error_status": False, + "throttling_status": False, + "message": "Sink was disabled by parameter `disable_sink`", + } + + +def test_sending_twilio_sms_notification_asynchronously_in_background_tasks() -> None: + # given + background_tasks = MagicMock() + block = TwilioSMSNotificationBlockV1( + cache=MemoryCache(), + background_tasks=background_tasks, + thread_pool_executor=None, + ) + + # when + result = block.run( + twilio_account_sid="token", + twilio_auth_token="token", + message="some", + sender_number="from", + receiver_number="to", + message_parameters={}, + message_parameters_operations={}, + fire_and_forget=True, + disable_sink=False, + cooldown_seconds=1, + cooldown_session_key=f"unique", + length_limit=128, + ) + + # then + assert result == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + background_tasks.add_task.assert_called_once() + + +def test_sending_twilio_sms_notification_asynchronously_in_thread_pool_executor() -> ( + None +): + # given + thread_pool_executor = MagicMock() + block = TwilioSMSNotificationBlockV1( + cache=MemoryCache(), + background_tasks=None, + thread_pool_executor=thread_pool_executor, + ) + + # when + result = block.run( + twilio_account_sid="token", + twilio_auth_token="token", + message="some", + sender_number="from", + receiver_number="to", + message_parameters={}, + message_parameters_operations={}, + fire_and_forget=True, + disable_sink=False, + cooldown_seconds=1, + cooldown_session_key=f"unique", + length_limit=128, + ) + + # then + assert result == { + "error_status": False, + "throttling_status": False, + "message": "Notification sent in the background task", + } + thread_pool_executor.submit.assert_called_once() From affabe2de3ff69a6fbae6dc0b72fc9c02128b269 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 29 Nov 2024 14:12:21 +0100 Subject: [PATCH 09/10] Add styling to new blocks --- .../secrets_providers/environment_secrets_store/v1.py | 4 ++++ .../workflows/core_steps/sinks/slack/notification/v1.py | 6 +++++- inference/core/workflows/core_steps/sinks/twilio/sms/v1.py | 6 +++++- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/inference/core/workflows/core_steps/secrets_providers/environment_secrets_store/v1.py b/inference/core/workflows/core_steps/secrets_providers/environment_secrets_store/v1.py index d78304223..16df139bd 100644 --- a/inference/core/workflows/core_steps/secrets_providers/environment_secrets_store/v1.py +++ b/inference/core/workflows/core_steps/secrets_providers/environment_secrets_store/v1.py @@ -21,6 +21,10 @@ class BlockManifest(WorkflowBlockManifest): "long_description": "TODO", "license": "Apache-2.0", "block_type": "secrets_provider", + "ui_manifest": { + "section": "advanced", + "icon": "far fa-key", + }, } ) type: Literal["roboflow_core/environment_secrets_store@v1"] diff --git a/inference/core/workflows/core_steps/sinks/slack/notification/v1.py b/inference/core/workflows/core_steps/sinks/slack/notification/v1.py index 868ae4b8a..cce3d7a2a 100644 --- a/inference/core/workflows/core_steps/sinks/slack/notification/v1.py +++ b/inference/core/workflows/core_steps/sinks/slack/notification/v1.py @@ -51,12 +51,16 @@ class BlockManifest(WorkflowBlockManifest): "long_description": LONG_DESCRIPTION, "license": "Apache-2.0", "block_type": "sink", + "ui_manifest": { + "section": "notifications", + "icon": "far fa-brands fa-slack", + }, } ) type: Literal["roboflow_core/slack_notification@v1"] slack_token: Union[str, Selector(kind=[STRING_KIND, SECRET_KIND])] = Field( description="Slack Token. Visit " - "Slack docks " + "https://api.slack.com/tutorials/tracks/getting-a-token " "to find out how to generate the token.", private=True, examples=["$inputs.slack_token"], diff --git a/inference/core/workflows/core_steps/sinks/twilio/sms/v1.py b/inference/core/workflows/core_steps/sinks/twilio/sms/v1.py index e36590d76..33683f0d7 100644 --- a/inference/core/workflows/core_steps/sinks/twilio/sms/v1.py +++ b/inference/core/workflows/core_steps/sinks/twilio/sms/v1.py @@ -50,13 +50,17 @@ class BlockManifest(WorkflowBlockManifest): "long_description": LONG_DESCRIPTION, "license": "Apache-2.0", "block_type": "sink", + "ui_manifest": { + "section": "notifications", + "icon": "far fa-comment-sms", + }, } ) type: Literal["roboflow_core/twilio_sms_notification@v1"] twilio_account_sid: Union[str, Selector(kind=[STRING_KIND, SECRET_KIND])] = Field( title="Twilio Account SID", description="Twilio Account SID. Visit " - "Twilio Console " + "https://twilio.com/console " "to set up SMS service and fetch the value.", private=True, examples=["$inputs.twilio_account_sid"], From 626e66a5dd60bf1383b33138d6dcbf6746fc1a1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 29 Nov 2024 14:15:44 +0100 Subject: [PATCH 10/10] Merge with main --- inference/core/version.py | 2 +- .../enterprise/workflows/enterprise_blocks/sinks/__init__.py | 0 .../workflows/enterprise_blocks/sinks/opc_writer/v1.py | 2 +- .../execution/test_workflow_with_opc_writer.py | 2 +- 4 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 inference/enterprise/workflows/enterprise_blocks/sinks/__init__.py diff --git a/inference/core/version.py b/inference/core/version.py index 3edb0bb7e..6e3fed1dd 100644 --- a/inference/core/version.py +++ b/inference/core/version.py @@ -1,4 +1,4 @@ -__version__ = "0.28.2" +__version__ = "0.29.0" if __name__ == "__main__": diff --git a/inference/enterprise/workflows/enterprise_blocks/sinks/__init__.py b/inference/enterprise/workflows/enterprise_blocks/sinks/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/inference/enterprise/workflows/enterprise_blocks/sinks/opc_writer/v1.py b/inference/enterprise/workflows/enterprise_blocks/sinks/opc_writer/v1.py index 299bd2c6b..3303a0fb0 100644 --- a/inference/enterprise/workflows/enterprise_blocks/sinks/opc_writer/v1.py +++ b/inference/enterprise/workflows/enterprise_blocks/sinks/opc_writer/v1.py @@ -107,7 +107,7 @@ class BlockManifest(WorkflowBlockManifest): "version": "v1", "short_description": "Pushes data to OPC server, this block is making use of [asyncua](https://github.com/FreeOpcUa/opcua-asyncio)", "long_description": LONG_DESCRIPTION, - "license": "Apache-2.0", + "license": "Roboflow Enterprise License", "block_type": "sink", } ) diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_opc_writer.py b/tests/workflows/integration_tests/execution/test_workflow_with_opc_writer.py index 3d5c7933c..6a55b5017 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_opc_writer.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_opc_writer.py @@ -1,9 +1,9 @@ import asyncio -import pytest import threading import time from typing import Optional, Union +import pytest from asyncua import Server from asyncua.client import Client as AsyncClient from asyncua.server.users import User, UserRole