From b2c1b0b4c66b31fd274cb0b3233d3297d50f1975 Mon Sep 17 00:00:00 2001 From: Ryan K Date: Thu, 10 Oct 2024 09:27:37 -0700 Subject: [PATCH 01/26] feat: Updated instance management SDK to 2024-09-15-preview (#409) --- .../edge/vendor/clients/iotopsmgmt/_client.py | 2 +- .../clients/iotopsmgmt/_configuration.py | 4 +- .../iotopsmgmt/operations/_operations.py | 774 +++--------------- .../edge/orchestration/resources/conftest.py | 2 +- 4 files changed, 114 insertions(+), 668 deletions(-) diff --git a/azext_edge/edge/vendor/clients/iotopsmgmt/_client.py b/azext_edge/edge/vendor/clients/iotopsmgmt/_client.py index 785107257..1cd45f6fb 100644 --- a/azext_edge/edge/vendor/clients/iotopsmgmt/_client.py +++ b/azext_edge/edge/vendor/clients/iotopsmgmt/_client.py @@ -58,7 +58,7 @@ class MicrosoftIoTOperationsManagementService: # pylint: disable=client-accepts :type credential: ~azure.core.credentials.TokenCredential :keyword endpoint: Service URL. Default value is "https://management.azure.com". :paramtype endpoint: str - :keyword api_version: Api Version. Default value is "2024-08-15-preview". Note that overriding + :keyword api_version: Api Version. Default value is "2024-09-15-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str :keyword int polling_interval: Default waiting time between two polls for LRO operations if no diff --git a/azext_edge/edge/vendor/clients/iotopsmgmt/_configuration.py b/azext_edge/edge/vendor/clients/iotopsmgmt/_configuration.py index 3559393e5..4e7f6b2a5 100644 --- a/azext_edge/edge/vendor/clients/iotopsmgmt/_configuration.py +++ b/azext_edge/edge/vendor/clients/iotopsmgmt/_configuration.py @@ -31,14 +31,14 @@ class MicrosoftIoTOperationsManagementServiceConfiguration( :type subscription_id: str :param credential: Credential needed for the client to connect to Azure. Required. :type credential: ~azure.core.credentials.TokenCredential - :keyword api_version: Api Version. Default value is "2024-08-15-preview". Note that overriding + :keyword api_version: Api Version. Default value is "2024-09-15-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, subscription_id: str, credential: "TokenCredential", **kwargs: Any) -> None: super(MicrosoftIoTOperationsManagementServiceConfiguration, self).__init__(**kwargs) - api_version: str = kwargs.pop("api_version", "2024-08-15-preview") + api_version: str = kwargs.pop("api_version", "2024-09-15-preview") if subscription_id is None: raise ValueError("Parameter 'subscription_id' must not be None.") diff --git a/azext_edge/edge/vendor/clients/iotopsmgmt/operations/_operations.py b/azext_edge/edge/vendor/clients/iotopsmgmt/operations/_operations.py index f1b5686d1..943d96ea8 100644 --- a/azext_edge/edge/vendor/clients/iotopsmgmt/operations/_operations.py +++ b/azext_edge/edge/vendor/clients/iotopsmgmt/operations/_operations.py @@ -48,7 +48,7 @@ def build_operations_list_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -67,7 +67,7 @@ def build_instance_list_by_subscription_request(subscription_id: str, **kwargs: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -93,7 +93,7 @@ def build_instance_list_by_resource_group_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -124,7 +124,7 @@ def build_instance_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -157,7 +157,7 @@ def build_instance_create_or_update_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -192,7 +192,7 @@ def build_instance_update_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -226,7 +226,7 @@ def build_instance_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -258,7 +258,7 @@ def build_broker_list_by_resource_group_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -290,7 +290,7 @@ def build_broker_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -326,7 +326,7 @@ def build_broker_create_or_update_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -363,7 +363,7 @@ def build_broker_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -398,7 +398,7 @@ def build_broker_authentication_list_by_resource_group_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -438,7 +438,7 @@ def build_broker_authentication_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -487,7 +487,7 @@ def build_broker_authentication_create_or_update_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -537,7 +537,7 @@ def build_broker_authentication_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -580,7 +580,7 @@ def build_broker_authorization_list_by_resource_group_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -620,7 +620,7 @@ def build_broker_authorization_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -669,7 +669,7 @@ def build_broker_authorization_create_or_update_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -719,7 +719,7 @@ def build_broker_authorization_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -762,7 +762,7 @@ def build_broker_listener_list_by_resource_group_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -802,7 +802,7 @@ def build_broker_listener_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -846,7 +846,7 @@ def build_broker_listener_create_or_update_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -891,7 +891,7 @@ def build_broker_listener_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -929,7 +929,7 @@ def build_dataflow_endpoint_list_by_resource_group_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -961,7 +961,7 @@ def build_dataflow_endpoint_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1002,7 +1002,7 @@ def build_dataflow_endpoint_create_or_update_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1044,7 +1044,7 @@ def build_dataflow_endpoint_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1084,7 +1084,7 @@ def build_dataflow_profile_list_by_resource_group_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1116,7 +1116,7 @@ def build_dataflow_profile_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1157,7 +1157,7 @@ def build_dataflow_profile_create_or_update_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1199,7 +1199,7 @@ def build_dataflow_profile_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1239,7 +1239,7 @@ def build_dataflow_list_by_profile_resource_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1284,7 +1284,7 @@ def build_dataflow_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1333,7 +1333,7 @@ def build_dataflow_create_or_update_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1383,7 +1383,7 @@ def build_dataflow_delete_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-08-15-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-09-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1599,32 +1599,9 @@ def list_by_subscription(self, **kwargs: Any) -> Iterable[JSON]: }, "name": "str", # Optional. The name of the resource. "properties": { - "schemaRegistryNamespace": "str", # The reference to the Schema - Registry for this AIO Instance. Required. - "components": { - "adr": { - "state": "str" # This determines if the ADR service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "akri": { - "state": "str" # This determines if the AKRI service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "connectors": { - "state": "str" # This determines if the Connector - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "dataflows": { - "state": "str" # This determines if the Dataflow - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "schemaRegistry": { - "state": "str" # This determines if the Schema - Registry service is enabled. Required. Known values are: "Enabled" - and "Disabled". - } + "schemaRegistryRef": { + "resourceId": "str" # The resource ID of the Schema + Registry. Required. }, "description": "str", # Optional. Detailed description of the Instance. @@ -1765,32 +1742,9 @@ def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Ite }, "name": "str", # Optional. The name of the resource. "properties": { - "schemaRegistryNamespace": "str", # The reference to the Schema - Registry for this AIO Instance. Required. - "components": { - "adr": { - "state": "str" # This determines if the ADR service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "akri": { - "state": "str" # This determines if the AKRI service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "connectors": { - "state": "str" # This determines if the Connector - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "dataflows": { - "state": "str" # This determines if the Dataflow - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "schemaRegistry": { - "state": "str" # This determines if the Schema - Registry service is enabled. Required. Known values are: "Enabled" - and "Disabled". - } + "schemaRegistryRef": { + "resourceId": "str" # The resource ID of the Schema + Registry. Required. }, "description": "str", # Optional. Detailed description of the Instance. @@ -1934,32 +1888,9 @@ def get(self, resource_group_name: str, instance_name: str, **kwargs: Any) -> JS }, "name": "str", # Optional. The name of the resource. "properties": { - "schemaRegistryNamespace": "str", # The reference to the Schema - Registry for this AIO Instance. Required. - "components": { - "adr": { - "state": "str" # This determines if the ADR service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "akri": { - "state": "str" # This determines if the AKRI service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "connectors": { - "state": "str" # This determines if the Connector - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "dataflows": { - "state": "str" # This determines if the Dataflow - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "schemaRegistry": { - "state": "str" # This determines if the Schema - Registry service is enabled. Required. Known values are: "Enabled" - and "Disabled". - } + "schemaRegistryRef": { + "resourceId": "str" # The resource ID of the Schema + Registry. Required. }, "description": "str", # Optional. Detailed description of the Instance. @@ -2173,32 +2104,9 @@ def begin_create_or_update( }, "name": "str", # Optional. The name of the resource. "properties": { - "schemaRegistryNamespace": "str", # The reference to the Schema - Registry for this AIO Instance. Required. - "components": { - "adr": { - "state": "str" # This determines if the ADR service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "akri": { - "state": "str" # This determines if the AKRI service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "connectors": { - "state": "str" # This determines if the Connector - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "dataflows": { - "state": "str" # This determines if the Dataflow - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "schemaRegistry": { - "state": "str" # This determines if the Schema - Registry service is enabled. Required. Known values are: "Enabled" - and "Disabled". - } + "schemaRegistryRef": { + "resourceId": "str" # The resource ID of the Schema + Registry. Required. }, "description": "str", # Optional. Detailed description of the Instance. @@ -2260,32 +2168,9 @@ def begin_create_or_update( }, "name": "str", # Optional. The name of the resource. "properties": { - "schemaRegistryNamespace": "str", # The reference to the Schema - Registry for this AIO Instance. Required. - "components": { - "adr": { - "state": "str" # This determines if the ADR service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "akri": { - "state": "str" # This determines if the AKRI service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "connectors": { - "state": "str" # This determines if the Connector - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "dataflows": { - "state": "str" # This determines if the Dataflow - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "schemaRegistry": { - "state": "str" # This determines if the Schema - Registry service is enabled. Required. Known values are: "Enabled" - and "Disabled". - } + "schemaRegistryRef": { + "resourceId": "str" # The resource ID of the Schema + Registry. Required. }, "description": "str", # Optional. Detailed description of the Instance. @@ -2384,32 +2269,9 @@ def begin_create_or_update( }, "name": "str", # Optional. The name of the resource. "properties": { - "schemaRegistryNamespace": "str", # The reference to the Schema - Registry for this AIO Instance. Required. - "components": { - "adr": { - "state": "str" # This determines if the ADR service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "akri": { - "state": "str" # This determines if the AKRI service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "connectors": { - "state": "str" # This determines if the Connector - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "dataflows": { - "state": "str" # This determines if the Dataflow - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "schemaRegistry": { - "state": "str" # This determines if the Schema - Registry service is enabled. Required. Known values are: "Enabled" - and "Disabled". - } + "schemaRegistryRef": { + "resourceId": "str" # The resource ID of the Schema + Registry. Required. }, "description": "str", # Optional. Detailed description of the Instance. @@ -2502,32 +2364,9 @@ def begin_create_or_update( }, "name": "str", # Optional. The name of the resource. "properties": { - "schemaRegistryNamespace": "str", # The reference to the Schema - Registry for this AIO Instance. Required. - "components": { - "adr": { - "state": "str" # This determines if the ADR service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "akri": { - "state": "str" # This determines if the AKRI service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "connectors": { - "state": "str" # This determines if the Connector - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "dataflows": { - "state": "str" # This determines if the Dataflow - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "schemaRegistry": { - "state": "str" # This determines if the Schema - Registry service is enabled. Required. Known values are: "Enabled" - and "Disabled". - } + "schemaRegistryRef": { + "resourceId": "str" # The resource ID of the Schema + Registry. Required. }, "description": "str", # Optional. Detailed description of the Instance. @@ -2589,32 +2428,9 @@ def begin_create_or_update( }, "name": "str", # Optional. The name of the resource. "properties": { - "schemaRegistryNamespace": "str", # The reference to the Schema - Registry for this AIO Instance. Required. - "components": { - "adr": { - "state": "str" # This determines if the ADR service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "akri": { - "state": "str" # This determines if the AKRI service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "connectors": { - "state": "str" # This determines if the Connector - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "dataflows": { - "state": "str" # This determines if the Dataflow - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "schemaRegistry": { - "state": "str" # This determines if the Schema - Registry service is enabled. Required. Known values are: "Enabled" - and "Disabled". - } + "schemaRegistryRef": { + "resourceId": "str" # The resource ID of the Schema + Registry. Required. }, "description": "str", # Optional. Detailed description of the Instance. @@ -2779,32 +2595,9 @@ def update( }, "name": "str", # Optional. The name of the resource. "properties": { - "schemaRegistryNamespace": "str", # The reference to the Schema - Registry for this AIO Instance. Required. - "components": { - "adr": { - "state": "str" # This determines if the ADR service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "akri": { - "state": "str" # This determines if the AKRI service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "connectors": { - "state": "str" # This determines if the Connector - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "dataflows": { - "state": "str" # This determines if the Dataflow - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "schemaRegistry": { - "state": "str" # This determines if the Schema - Registry service is enabled. Required. Known values are: "Enabled" - and "Disabled". - } + "schemaRegistryRef": { + "resourceId": "str" # The resource ID of the Schema + Registry. Required. }, "description": "str", # Optional. Detailed description of the Instance. @@ -2896,32 +2689,9 @@ def update( }, "name": "str", # Optional. The name of the resource. "properties": { - "schemaRegistryNamespace": "str", # The reference to the Schema - Registry for this AIO Instance. Required. - "components": { - "adr": { - "state": "str" # This determines if the ADR service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "akri": { - "state": "str" # This determines if the AKRI service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "connectors": { - "state": "str" # This determines if the Connector - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "dataflows": { - "state": "str" # This determines if the Dataflow - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "schemaRegistry": { - "state": "str" # This determines if the Schema - Registry service is enabled. Required. Known values are: "Enabled" - and "Disabled". - } + "schemaRegistryRef": { + "resourceId": "str" # The resource ID of the Schema + Registry. Required. }, "description": "str", # Optional. Detailed description of the Instance. @@ -3032,32 +2802,9 @@ def update(self, resource_group_name: str, instance_name: str, properties: Union }, "name": "str", # Optional. The name of the resource. "properties": { - "schemaRegistryNamespace": "str", # The reference to the Schema - Registry for this AIO Instance. Required. - "components": { - "adr": { - "state": "str" # This determines if the ADR service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "akri": { - "state": "str" # This determines if the AKRI service - is enabled. Required. Known values are: "Enabled" and "Disabled". - }, - "connectors": { - "state": "str" # This determines if the Connector - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "dataflows": { - "state": "str" # This determines if the Dataflow - service is enabled. Required. Known values are: "Enabled" and - "Disabled". - }, - "schemaRegistry": { - "state": "str" # This determines if the Schema - Registry service is enabled. Required. Known values are: "Enabled" - and "Disabled". - } + "schemaRegistryRef": { + "resourceId": "str" # The resource ID of the Schema + Registry. Required. }, "description": "str", # Optional. Detailed description of the Instance. @@ -3351,28 +3098,10 @@ def list_by_resource_group(self, resource_group_name: str, instance_name: str, * }, "diagnostics": { "logs": { - "level": "info", # Optional. Default value is - "info". The log level. Examples - 'debug', 'info', 'warn', 'error', - 'trace'. - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30, # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - "level": "error" # Optional. Default value - is "error". The log level. Examples - 'debug', 'info', 'warn', - 'error', 'trace'. - } + "level": "info" # Optional. Default value is "info". + The log level. Examples - 'debug', 'info', 'warn', 'error', 'trace'. }, "metrics": { - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "prometheusPort": 9600 # Optional. Default value is 9600. The prometheus port to expose the metrics. }, @@ -3390,13 +3119,6 @@ def list_by_resource_group(self, resource_group_name: str, instance_name: str, * is 16. The cache size in megabytes. "mode": "str", # Optional. The toggle to enable/disable traces. Known values are: "Enabled" and "Disabled". - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "selfTracing": { "intervalSeconds": 30, # Optional. Default value is 30. The self tracing interval. @@ -3767,28 +3489,10 @@ def get(self, resource_group_name: str, instance_name: str, broker_name: str, ** }, "diagnostics": { "logs": { - "level": "info", # Optional. Default value is - "info". The log level. Examples - 'debug', 'info', 'warn', 'error', - 'trace'. - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30, # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - "level": "error" # Optional. Default value - is "error". The log level. Examples - 'debug', 'info', 'warn', - 'error', 'trace'. - } + "level": "info" # Optional. Default value is "info". + The log level. Examples - 'debug', 'info', 'warn', 'error', 'trace'. }, "metrics": { - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "prometheusPort": 9600 # Optional. Default value is 9600. The prometheus port to expose the metrics. }, @@ -3806,13 +3510,6 @@ def get(self, resource_group_name: str, instance_name: str, broker_name: str, ** is 16. The cache size in megabytes. "mode": "str", # Optional. The toggle to enable/disable traces. Known values are: "Enabled" and "Disabled". - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "selfTracing": { "intervalSeconds": 30, # Optional. Default value is 30. The self tracing interval. @@ -4255,28 +3952,10 @@ def begin_create_or_update( }, "diagnostics": { "logs": { - "level": "info", # Optional. Default value is - "info". The log level. Examples - 'debug', 'info', 'warn', 'error', - 'trace'. - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30, # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - "level": "error" # Optional. Default value - is "error". The log level. Examples - 'debug', 'info', 'warn', - 'error', 'trace'. - } + "level": "info" # Optional. Default value is "info". + The log level. Examples - 'debug', 'info', 'warn', 'error', 'trace'. }, "metrics": { - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "prometheusPort": 9600 # Optional. Default value is 9600. The prometheus port to expose the metrics. }, @@ -4294,13 +3973,6 @@ def begin_create_or_update( is 16. The cache size in megabytes. "mode": "str", # Optional. The toggle to enable/disable traces. Known values are: "Enabled" and "Disabled". - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "selfTracing": { "intervalSeconds": 30, # Optional. Default value is 30. The self tracing interval. @@ -4586,28 +4258,10 @@ def begin_create_or_update( }, "diagnostics": { "logs": { - "level": "info", # Optional. Default value is - "info". The log level. Examples - 'debug', 'info', 'warn', 'error', - 'trace'. - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30, # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - "level": "error" # Optional. Default value - is "error". The log level. Examples - 'debug', 'info', 'warn', - 'error', 'trace'. - } + "level": "info" # Optional. Default value is "info". + The log level. Examples - 'debug', 'info', 'warn', 'error', 'trace'. }, "metrics": { - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "prometheusPort": 9600 # Optional. Default value is 9600. The prometheus port to expose the metrics. }, @@ -4625,13 +4279,6 @@ def begin_create_or_update( is 16. The cache size in megabytes. "mode": "str", # Optional. The toggle to enable/disable traces. Known values are: "Enabled" and "Disabled". - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "selfTracing": { "intervalSeconds": 30, # Optional. Default value is 30. The self tracing interval. @@ -4957,28 +4604,10 @@ def begin_create_or_update( }, "diagnostics": { "logs": { - "level": "info", # Optional. Default value is - "info". The log level. Examples - 'debug', 'info', 'warn', 'error', - 'trace'. - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30, # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - "level": "error" # Optional. Default value - is "error". The log level. Examples - 'debug', 'info', 'warn', - 'error', 'trace'. - } + "level": "info" # Optional. Default value is "info". + The log level. Examples - 'debug', 'info', 'warn', 'error', 'trace'. }, "metrics": { - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "prometheusPort": 9600 # Optional. Default value is 9600. The prometheus port to expose the metrics. }, @@ -4996,13 +4625,6 @@ def begin_create_or_update( is 16. The cache size in megabytes. "mode": "str", # Optional. The toggle to enable/disable traces. Known values are: "Enabled" and "Disabled". - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "selfTracing": { "intervalSeconds": 30, # Optional. Default value is 30. The self tracing interval. @@ -5321,28 +4943,10 @@ def begin_create_or_update( }, "diagnostics": { "logs": { - "level": "info", # Optional. Default value is - "info". The log level. Examples - 'debug', 'info', 'warn', 'error', - 'trace'. - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30, # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - "level": "error" # Optional. Default value - is "error". The log level. Examples - 'debug', 'info', 'warn', - 'error', 'trace'. - } + "level": "info" # Optional. Default value is "info". + The log level. Examples - 'debug', 'info', 'warn', 'error', 'trace'. }, "metrics": { - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "prometheusPort": 9600 # Optional. Default value is 9600. The prometheus port to expose the metrics. }, @@ -5360,13 +4964,6 @@ def begin_create_or_update( is 16. The cache size in megabytes. "mode": "str", # Optional. The toggle to enable/disable traces. Known values are: "Enabled" and "Disabled". - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "selfTracing": { "intervalSeconds": 30, # Optional. Default value is 30. The self tracing interval. @@ -5652,28 +5249,10 @@ def begin_create_or_update( }, "diagnostics": { "logs": { - "level": "info", # Optional. Default value is - "info". The log level. Examples - 'debug', 'info', 'warn', 'error', - 'trace'. - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30, # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - "level": "error" # Optional. Default value - is "error". The log level. Examples - 'debug', 'info', 'warn', - 'error', 'trace'. - } + "level": "info" # Optional. Default value is "info". + The log level. Examples - 'debug', 'info', 'warn', 'error', 'trace'. }, "metrics": { - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "prometheusPort": 9600 # Optional. Default value is 9600. The prometheus port to expose the metrics. }, @@ -5691,13 +5270,6 @@ def begin_create_or_update( is 16. The cache size in megabytes. "mode": "str", # Optional. The toggle to enable/disable traces. Known values are: "Enabled" and "Disabled". - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "selfTracing": { "intervalSeconds": 30, # Optional. Default value is 30. The self tracing interval. @@ -8597,8 +8169,8 @@ def list_by_resource_group( "provisioningState": "str", # Optional. The status of the last operation. Known values are: "Succeeded", "Failed", "Canceled", "Provisioning", "Updating", "Deleting", and "Accepted". - "serviceName": "aio-mq-dmqtt-frontend", # Optional. Default value is - "aio-mq-dmqtt-frontend". Kubernetes Service name of this listener. + "serviceName": "str", # Optional. Kubernetes Service name of this + listener. "serviceType": "str" # Optional. Kubernetes Service type of this listener. Known values are: "ClusterIp", "LoadBalancer", and "NodePort". }, @@ -8793,8 +8365,8 @@ def get( "provisioningState": "str", # Optional. The status of the last operation. Known values are: "Succeeded", "Failed", "Canceled", "Provisioning", "Updating", "Deleting", and "Accepted". - "serviceName": "aio-mq-dmqtt-frontend", # Optional. Default value is - "aio-mq-dmqtt-frontend". Kubernetes Service name of this listener. + "serviceName": "str", # Optional. Kubernetes Service name of this + listener. "serviceType": "str" # Optional. Kubernetes Service type of this listener. Known values are: "ClusterIp", "LoadBalancer", and "NodePort". }, @@ -9067,8 +8639,8 @@ def begin_create_or_update( "provisioningState": "str", # Optional. The status of the last operation. Known values are: "Succeeded", "Failed", "Canceled", "Provisioning", "Updating", "Deleting", and "Accepted". - "serviceName": "aio-mq-dmqtt-frontend", # Optional. Default value is - "aio-mq-dmqtt-frontend". Kubernetes Service name of this listener. + "serviceName": "str", # Optional. Kubernetes Service name of this + listener. "serviceType": "str" # Optional. Kubernetes Service type of this listener. Known values are: "ClusterIp", "LoadBalancer", and "NodePort". }, @@ -9173,8 +8745,8 @@ def begin_create_or_update( "provisioningState": "str", # Optional. The status of the last operation. Known values are: "Succeeded", "Failed", "Canceled", "Provisioning", "Updating", "Deleting", and "Accepted". - "serviceName": "aio-mq-dmqtt-frontend", # Optional. Default value is - "aio-mq-dmqtt-frontend". Kubernetes Service name of this listener. + "serviceName": "str", # Optional. Kubernetes Service name of this + listener. "serviceType": "str" # Optional. Kubernetes Service type of this listener. Known values are: "ClusterIp", "LoadBalancer", and "NodePort". }, @@ -9322,8 +8894,8 @@ def begin_create_or_update( "provisioningState": "str", # Optional. The status of the last operation. Known values are: "Succeeded", "Failed", "Canceled", "Provisioning", "Updating", "Deleting", and "Accepted". - "serviceName": "aio-mq-dmqtt-frontend", # Optional. Default value is - "aio-mq-dmqtt-frontend". Kubernetes Service name of this listener. + "serviceName": "str", # Optional. Kubernetes Service name of this + listener. "serviceType": "str" # Optional. Kubernetes Service type of this listener. Known values are: "ClusterIp", "LoadBalancer", and "NodePort". }, @@ -9469,8 +9041,8 @@ def begin_create_or_update( "provisioningState": "str", # Optional. The status of the last operation. Known values are: "Succeeded", "Failed", "Canceled", "Provisioning", "Updating", "Deleting", and "Accepted". - "serviceName": "aio-mq-dmqtt-frontend", # Optional. Default value is - "aio-mq-dmqtt-frontend". Kubernetes Service name of this listener. + "serviceName": "str", # Optional. Kubernetes Service name of this + listener. "serviceType": "str" # Optional. Kubernetes Service type of this listener. Known values are: "ClusterIp", "LoadBalancer", and "NodePort". }, @@ -9575,8 +9147,8 @@ def begin_create_or_update( "provisioningState": "str", # Optional. The status of the last operation. Known values are: "Succeeded", "Failed", "Canceled", "Provisioning", "Updating", "Deleting", and "Accepted". - "serviceName": "aio-mq-dmqtt-frontend", # Optional. Default value is - "aio-mq-dmqtt-frontend". Kubernetes Service name of this listener. + "serviceName": "str", # Optional. Kubernetes Service name of this + listener. "serviceType": "str" # Optional. Kubernetes Service type of this listener. Known values are: "ClusterIp", "LoadBalancer", and "NodePort". }, @@ -12155,28 +11727,10 @@ def list_by_resource_group(self, resource_group_name: str, instance_name: str, * "properties": { "diagnostics": { "logs": { - "level": "info", # Optional. Default value is - "info". The log level. Examples - 'debug', 'info', 'warn', 'error', - 'trace'. - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30, # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - "level": "error" # Optional. Default value - is "error". The log level. Examples - 'debug', 'info', 'warn', - 'error', 'trace'. - } + "level": "info" # Optional. Default value is "info". + The log level. Examples - 'debug', 'info', 'warn', 'error', 'trace'. }, "metrics": { - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "prometheusPort": 9600 # Optional. Default value is 9600. The prometheus port to expose the metrics. } @@ -12305,28 +11859,10 @@ def get(self, resource_group_name: str, instance_name: str, dataflow_profile_nam "properties": { "diagnostics": { "logs": { - "level": "info", # Optional. Default value is - "info". The log level. Examples - 'debug', 'info', 'warn', 'error', - 'trace'. - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30, # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - "level": "error" # Optional. Default value - is "error". The log level. Examples - 'debug', 'info', 'warn', - 'error', 'trace'. - } + "level": "info" # Optional. Default value is "info". + The log level. Examples - 'debug', 'info', 'warn', 'error', 'trace'. }, "metrics": { - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "prometheusPort": 9600 # Optional. Default value is 9600. The prometheus port to expose the metrics. } @@ -12532,28 +12068,10 @@ def begin_create_or_update( "properties": { "diagnostics": { "logs": { - "level": "info", # Optional. Default value is - "info". The log level. Examples - 'debug', 'info', 'warn', 'error', - 'trace'. - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30, # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - "level": "error" # Optional. Default value - is "error". The log level. Examples - 'debug', 'info', 'warn', - 'error', 'trace'. - } + "level": "info" # Optional. Default value is "info". + The log level. Examples - 'debug', 'info', 'warn', 'error', 'trace'. }, "metrics": { - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "prometheusPort": 9600 # Optional. Default value is 9600. The prometheus port to expose the metrics. } @@ -12597,28 +12115,10 @@ def begin_create_or_update( "properties": { "diagnostics": { "logs": { - "level": "info", # Optional. Default value is - "info". The log level. Examples - 'debug', 'info', 'warn', 'error', - 'trace'. - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30, # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - "level": "error" # Optional. Default value - is "error". The log level. Examples - 'debug', 'info', 'warn', - 'error', 'trace'. - } + "level": "info" # Optional. Default value is "info". + The log level. Examples - 'debug', 'info', 'warn', 'error', 'trace'. }, "metrics": { - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "prometheusPort": 9600 # Optional. Default value is 9600. The prometheus port to expose the metrics. } @@ -12702,28 +12202,10 @@ def begin_create_or_update( "properties": { "diagnostics": { "logs": { - "level": "info", # Optional. Default value is - "info". The log level. Examples - 'debug', 'info', 'warn', 'error', - 'trace'. - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30, # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - "level": "error" # Optional. Default value - is "error". The log level. Examples - 'debug', 'info', 'warn', - 'error', 'trace'. - } + "level": "info" # Optional. Default value is "info". + The log level. Examples - 'debug', 'info', 'warn', 'error', 'trace'. }, "metrics": { - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "prometheusPort": 9600 # Optional. Default value is 9600. The prometheus port to expose the metrics. } @@ -12805,28 +12287,10 @@ def begin_create_or_update( "properties": { "diagnostics": { "logs": { - "level": "info", # Optional. Default value is - "info". The log level. Examples - 'debug', 'info', 'warn', 'error', - 'trace'. - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30, # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - "level": "error" # Optional. Default value - is "error". The log level. Examples - 'debug', 'info', 'warn', - 'error', 'trace'. - } + "level": "info" # Optional. Default value is "info". + The log level. Examples - 'debug', 'info', 'warn', 'error', 'trace'. }, "metrics": { - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "prometheusPort": 9600 # Optional. Default value is 9600. The prometheus port to expose the metrics. } @@ -12870,28 +12334,10 @@ def begin_create_or_update( "properties": { "diagnostics": { "logs": { - "level": "info", # Optional. Default value is - "info". The log level. Examples - 'debug', 'info', 'warn', 'error', - 'trace'. - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30, # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - "level": "error" # Optional. Default value - is "error". The log level. Examples - 'debug', 'info', 'warn', - 'error', 'trace'. - } + "level": "info" # Optional. Default value is "info". + The log level. Examples - 'debug', 'info', 'warn', 'error', 'trace'. }, "metrics": { - "opentelemetryExportConfig": { - "otlpGrpcEndpoint": "str", # The open - telemetry collector endpoint to export to. Required. - "intervalSeconds": 30 # Optional. Default - value is 30. How often to export the metrics to the open - telemetry collector. - }, "prometheusPort": 9600 # Optional. Default value is 9600. The prometheus port to expose the metrics. } diff --git a/azext_edge/tests/edge/orchestration/resources/conftest.py b/azext_edge/tests/edge/orchestration/resources/conftest.py index 98e5189d0..ecc4b9379 100644 --- a/azext_edge/tests/edge/orchestration/resources/conftest.py +++ b/azext_edge/tests/edge/orchestration/resources/conftest.py @@ -15,7 +15,7 @@ BASE_URL = "https://management.azure.com" RESOURCE_PROVIDER = "Microsoft.IoTOperations" QUALIFIED_INSTANCE_TYPE = f"{RESOURCE_PROVIDER}/instances" -INSTANCES_API_VERSION = "2024-08-15-preview" +INSTANCES_API_VERSION = "2024-09-15-preview" CUSTOM_LOCATION_NAME = generate_random_string() From 4aef15a6aa49071eeb6149b3b5661e93bcd78f96 Mon Sep 17 00:00:00 2001 From: Elsie4ever <3467996@gmail.com> Date: Mon, 14 Oct 2024 18:41:27 -0700 Subject: [PATCH 02/26] feat: add strato(azuremonitor) resource in support bundle (#403) --- azext_edge/edge/common.py | 5 + .../edge/providers/edge_api/__init__.py | 2 + .../edge/providers/edge_api/azuremonitor.py | 10 ++ .../edge/providers/support/azuremonitor.py | 99 +++++++++++++++ azext_edge/edge/providers/support/base.py | 15 ++- azext_edge/edge/providers/support/shared.py | 22 +++- azext_edge/edge/providers/support_bundle.py | 7 ++ azext_edge/tests/edge/support/conftest.py | 28 ++++- .../edge/support/create_bundle_int/helpers.py | 24 +++- .../create_bundle_int/test_auto_int.py | 18 ++- .../test_azuremonitor_int.py | 36 ++++++ .../support/test_azuremonitor_support_unit.py | 113 ++++++++++++++++++ .../tests/edge/support/test_support_unit.py | 26 ++-- 13 files changed, 378 insertions(+), 27 deletions(-) create mode 100644 azext_edge/edge/providers/edge_api/azuremonitor.py create mode 100644 azext_edge/edge/providers/support/azuremonitor.py create mode 100644 azext_edge/tests/edge/support/create_bundle_int/test_azuremonitor_int.py create mode 100644 azext_edge/tests/edge/support/test_azuremonitor_support_unit.py diff --git a/azext_edge/edge/common.py b/azext_edge/edge/common.py index fc0bef710..59ca15a62 100644 --- a/azext_edge/edge/common.py +++ b/azext_edge/edge/common.py @@ -154,6 +154,7 @@ class OpsServiceType(ListableEnum): schemaregistry = "schemaregistry" arccontainerstorage = "acs" secretstore = "secretstore" + azuremonitor = "azuremonitor" @classmethod def list_check_services(cls): @@ -216,18 +217,21 @@ class AEPAuthModes(Enum): class AEPTypes(Enum): """Asset Endpoint Profile (connector) Types""" + # TODO: ensure this is the final enum opcua = "OpcUa" class TopicRetain(Enum): """Set the retain flag for messages published to an MQTT broker.""" + keep = "Keep" never = "Never" class SecurityModes(Enum): """Security modes for OPCUA connector.""" + none = "none" sign = "sign" sign_and_encrypt = "signAndEncrypt" @@ -235,6 +239,7 @@ class SecurityModes(Enum): class SecurityPolicies(Enum): """Security policies for the OPCUA connector.""" + # TODO: add in user friendly input mapping none = "none" basic128 = "Basic128Rsa15" diff --git a/azext_edge/edge/providers/edge_api/__init__.py b/azext_edge/edge/providers/edge_api/__init__.py index 44d1f9cb2..46220d9c5 100644 --- a/azext_edge/edge/providers/edge_api/__init__.py +++ b/azext_edge/edge/providers/edge_api/__init__.py @@ -14,9 +14,11 @@ from .meta import META_API_V1B1, MetaResourceKinds from .arccontainerstorage import ARCCONTAINERSTORAGE_API_V1 from .secretstore import SECRETSYNC_API_V1, SECRETSTORE_API_V1 +from .azuremonitor import AZUREMONITOR_API_V1 __all__ = [ "ARCCONTAINERSTORAGE_API_V1", + "AZUREMONITOR_API_V1", "CLUSTER_CONFIG_API_V1", "EdgeResourceApi", "EdgeApiManager", diff --git a/azext_edge/edge/providers/edge_api/azuremonitor.py b/azext_edge/edge/providers/edge_api/azuremonitor.py new file mode 100644 index 000000000..4c00d44bc --- /dev/null +++ b/azext_edge/edge/providers/edge_api/azuremonitor.py @@ -0,0 +1,10 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- + +from .base import EdgeResourceApi + + +AZUREMONITOR_API_V1 = EdgeResourceApi(group="azuremonitor.microsoft.com", version="v1alpha1", moniker="azuremonitor") diff --git a/azext_edge/edge/providers/support/azuremonitor.py b/azext_edge/edge/providers/support/azuremonitor.py new file mode 100644 index 000000000..4d7dca7b8 --- /dev/null +++ b/azext_edge/edge/providers/support/azuremonitor.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- + +from functools import partial +from typing import Iterable, Optional + +from knack.log import get_logger + +from ..edge_api import AZUREMONITOR_API_V1, EdgeResourceApi +from .base import ( + DAY_IN_SECONDS, + assemble_crd_work, + process_config_maps, + process_deployments, + process_replicasets, + process_services, + process_statefulset, + process_v1_pods, +) + +logger = get_logger(__name__) + +MONITOR_DIRECTORY_PATH = AZUREMONITOR_API_V1.moniker +# No common label for azuremonitor +MONITOR_NAMESPACE = "azure-arc" +DIAGNOSTICS_OPERATOR_PREFIX = "diagnostics-operator" +DIAGNOSTICS_V1_PREFIX = "diagnostics-v1" + + +def fetch_deployments(): + return process_deployments( + directory_path=MONITOR_DIRECTORY_PATH, namespace=MONITOR_NAMESPACE, prefix_names=[DIAGNOSTICS_OPERATOR_PREFIX] + ) + + +def fetch_replicasets(): + return process_replicasets( + directory_path=MONITOR_DIRECTORY_PATH, namespace=MONITOR_NAMESPACE, prefix_names=[DIAGNOSTICS_OPERATOR_PREFIX] + ) + + +def fetch_pods(since_seconds: int = DAY_IN_SECONDS): + return process_v1_pods( + directory_path=MONITOR_DIRECTORY_PATH, + namespace=MONITOR_NAMESPACE, + since_seconds=since_seconds, + prefix_names=[DIAGNOSTICS_OPERATOR_PREFIX, DIAGNOSTICS_V1_PREFIX], + ) + + +def fetch_services(): + return process_services( + directory_path=MONITOR_DIRECTORY_PATH, + namespace=MONITOR_NAMESPACE, + prefix_names=[DIAGNOSTICS_OPERATOR_PREFIX, DIAGNOSTICS_V1_PREFIX], + ) + + +def fetch_statefulsets(): + return process_statefulset( + directory_path=MONITOR_DIRECTORY_PATH, + return_namespaces=False, + namespace=MONITOR_NAMESPACE, + prefix_names=[DIAGNOSTICS_V1_PREFIX], + ) + + +def fetch_configmaps(): + return process_config_maps( + directory_path=MONITOR_DIRECTORY_PATH, + prefix_names=[DIAGNOSTICS_V1_PREFIX], + ) + + +support_runtime_elements = { + "configmaps": fetch_configmaps, + "deployments": fetch_deployments, + "statefulsets": fetch_statefulsets, + "replicasets": fetch_replicasets, + "services": fetch_services, +} + + +def prepare_bundle( + log_age_seconds: int = DAY_IN_SECONDS, + apis: Optional[Iterable[EdgeResourceApi]] = None, +) -> dict: + monitor_to_run = {} + + if apis: + monitor_to_run.update(assemble_crd_work(apis=apis, namespace=MONITOR_NAMESPACE)) + + support_runtime_elements["pods"] = partial(fetch_pods, since_seconds=log_age_seconds) + monitor_to_run.update(support_runtime_elements) + + return monitor_to_run diff --git a/azext_edge/edge/providers/support/base.py b/azext_edge/edge/providers/support/base.py index 52692d5bd..a7d911648 100644 --- a/azext_edge/edge/providers/support/base.py +++ b/azext_edge/edge/providers/support/base.py @@ -219,17 +219,26 @@ def process_statefulset( return_namespaces: bool = False, field_selector: Optional[str] = None, label_selector: Optional[str] = None, + prefix_names: Optional[List[str]] = None, + namespace: Optional[str] = None, ) -> Union[Tuple[List[dict], dict], List[dict]]: v1_apps = client.AppsV1Api() - statefulsets: V1StatefulSetList = v1_apps.list_stateful_set_for_all_namespaces( - label_selector=label_selector, field_selector=field_selector - ) + + if namespace: + statefulsets: V1StatefulSetList = v1_apps.list_namespaced_stateful_set( + namespace=namespace, label_selector=label_selector, field_selector=field_selector + ) + else: + statefulsets: V1StatefulSetList = v1_apps.list_stateful_set_for_all_namespaces( + label_selector=label_selector, field_selector=field_selector + ) namespace_pods_work = {} processed = _process_kubernetes_resources( directory_path=directory_path, resources=statefulsets, kind=BundleResourceKind.statefulset.value, + prefix_names=prefix_names, ) for statefulset in statefulsets.items: diff --git a/azext_edge/edge/providers/support/shared.py b/azext_edge/edge/providers/support/shared.py index 90c3e7afb..55b02dfbf 100644 --- a/azext_edge/edge/providers/support/shared.py +++ b/azext_edge/edge/providers/support/shared.py @@ -8,7 +8,7 @@ from ..k8s.config_map import get_config_map from ..orchestration.base import ARC_CONFIG_MAP, ARC_NAMESPACE -from .base import process_events, process_nodes, process_storage_classes +from .base import process_events, process_nodes, process_storage_classes, get_custom_objects logger = get_logger(__name__) @@ -27,9 +27,29 @@ def process_arc_kpis(): return result +def process_extension_configs(): + result = [] + extension_config = get_custom_objects( + group="clusterconfig.azure.com", + version="v1beta1", + plural="extensionconfigs", + ) + + if extension_config: + result.append( + { + "data": extension_config, + "zinfo": "extensionconfigs.yaml", + } + ) + + return result + + support_shared_elements = { "nodes": process_nodes, "events": process_events, + "extensionconfigs": process_extension_configs, "storageclasses": process_storage_classes, "arc": process_arc_kpis, } diff --git a/azext_edge/edge/providers/support_bundle.py b/azext_edge/edge/providers/support_bundle.py index 7f15d7113..8e130a14b 100644 --- a/azext_edge/edge/providers/support_bundle.py +++ b/azext_edge/edge/providers/support_bundle.py @@ -22,6 +22,7 @@ ARCCONTAINERSTORAGE_API_V1, SECRETSYNC_API_V1, SECRETSTORE_API_V1, + AZUREMONITOR_API_V1, EdgeApiManager, ) @@ -37,6 +38,7 @@ COMPAT_META_APIS = EdgeApiManager(resource_apis=[META_API_V1B1]) COMPAT_ARCCONTAINERSTORAGE_APIS = EdgeApiManager(resource_apis=[ARCCONTAINERSTORAGE_API_V1]) COMPAT_SECRETSTORE_APIS = EdgeApiManager(resource_apis=[SECRETSYNC_API_V1, SECRETSTORE_API_V1]) +COMPAT_AZUREMONITOR_APIS = EdgeApiManager(resource_apis=[AZUREMONITOR_API_V1]) def build_bundle( @@ -61,6 +63,7 @@ def build_bundle( from .support.schemaregistry import prepare_bundle as prepare_schema_registry_bundle from .support.arccontainerstorage import prepare_bundle as prepare_arccontainerstorage_bundle from .support.secretstore import prepare_bundle as prepare_secretstore_bundle + from .support.azuremonitor import prepare_bundle as prepare_azuremonitor_bundle def collect_default_works( pending_work: dict, @@ -109,6 +112,10 @@ def collect_default_works( "apis": COMPAT_SECRETSTORE_APIS, "prepare_bundle": prepare_secretstore_bundle, }, + OpsServiceType.azuremonitor.value: { + "apis": COMPAT_AZUREMONITOR_APIS, + "prepare_bundle": prepare_azuremonitor_bundle, + }, } if not ops_services: diff --git a/azext_edge/tests/edge/support/conftest.py b/azext_edge/tests/edge/support/conftest.py index 37bfe9aea..b06db93c6 100644 --- a/azext_edge/tests/edge/support/conftest.py +++ b/azext_edge/tests/edge/support/conftest.py @@ -278,6 +278,7 @@ def _handle_list_deployments(*args, **kwargs): "aio-opc-supervisor", "aio-opc-opc", "opcplc-0000000", + "diagnostics-operator-deployment", ] deployment_list = [] for name in names: @@ -297,7 +298,10 @@ def mocked_list_replicasets(mocked_client): from kubernetes.client.models import V1ReplicaSetList, V1ReplicaSet, V1ObjectMeta def _handle_list_replicasets(*args, **kwargs): - names = ["mock_replicaset"] + names = [ + "mock_replicaset", + "diagnostics-operator-deployment", + ] replicaset_list = [] for name in names: replicaset_list.append(V1ReplicaSet(metadata=V1ObjectMeta(namespace="mock_namespace", name=name))) @@ -316,12 +320,20 @@ def mocked_list_statefulsets(mocked_client): from kubernetes.client.models import V1StatefulSetList, V1StatefulSet, V1ObjectMeta def _handle_list_statefulsets(*args, **kwargs): - statefulset = V1StatefulSet(metadata=V1ObjectMeta(namespace="mock_namespace", name="mock_statefulset")) - statefulset_list = V1StatefulSetList(items=[statefulset]) + names = [ + "mock_statefulset", + "diagnostics-v1-statefulset", + ] + + statefulset_list = [] + for name in names: + statefulset_list.append(V1StatefulSet(metadata=V1ObjectMeta(namespace="mock_namespace", name=name))) + statefulset_list = V1StatefulSetList(items=statefulset_list) return statefulset_list mocked_client.AppsV1Api().list_stateful_set_for_all_namespaces.side_effect = _handle_list_statefulsets + mocked_client.AppsV1Api().list_namespaced_stateful_set.side_effect = _handle_list_statefulsets yield mocked_client @@ -331,7 +343,7 @@ def mocked_list_services(mocked_client): from kubernetes.client.models import V1ServiceList, V1Service, V1ObjectMeta def _handle_list_services(*args, **kwargs): - service_names = ["mock_service", "opcplc-0000000", "aio-operator"] + service_names = ["mock_service", "opcplc-0000000", "aio-operator", "diagnostics-operator-service"] service_list = [] for name in service_names: service_list.append(V1Service(metadata=V1ObjectMeta(namespace="mock_namespace", name=name))) @@ -441,8 +453,12 @@ def mocked_list_config_maps(mocked_client): from kubernetes.client.models import V1ConfigMapList, V1ConfigMap, V1ObjectMeta def _handle_list_config_maps(*args, **kwargs): - config_map = V1ConfigMap(metadata=V1ObjectMeta(namespace="mock_namespace", name="mock_config_map")) - config_map_list = V1ConfigMapList(items=[config_map]) + names = ["mock_config_map", "diagnostics-v1-collector-config"] + + config_map_list = [] + for name in names: + config_map_list.append(V1ConfigMap(metadata=V1ObjectMeta(namespace="mock_namespace", name=name))) + config_map_list = V1ConfigMapList(items=config_map_list) return config_map_list diff --git a/azext_edge/tests/edge/support/create_bundle_int/helpers.py b/azext_edge/tests/edge/support/create_bundle_int/helpers.py index f7d5010d5..4e0f4f269 100644 --- a/azext_edge/tests/edge/support/create_bundle_int/helpers.py +++ b/azext_edge/tests/edge/support/create_bundle_int/helpers.py @@ -287,6 +287,14 @@ def get_file_map( assert len(walk_result) == 2 + expected_default_walk_result file_map[OpsServiceType.secretstore.value] = convert_file_names(walk_result[ssc_path]["files"]) file_map["__namespaces__"][OpsServiceType.secretstore.value] = ssc_namespace + elif ops_service == OpsServiceType.azuremonitor.value: + monitor_path = path.join(BASE_ZIP_PATH, arc_namespace, OpsServiceType.azuremonitor.value) + assert len(walk_result) == 1 + expected_default_walk_result + file_map[OpsServiceType.azuremonitor.value] = convert_file_names(walk_result[monitor_path]["files"]) + file_map["__namespaces__"][OpsServiceType.azuremonitor.value] = arc_namespace + + # no files for aio, skip the rest assertions + return file_map elif ops_service == "deviceregistry": if ops_path not in walk_result: assert len(walk_result) == expected_default_walk_result @@ -342,16 +350,20 @@ def _get_namespace_determinating_files(name: str, folder: str, file_prefix: str) else: namespace = name - for namespace_folder, service in [ - (clusterconfig_namespace, "clusterconfig"), - (arc_namespace, "arcagents"), - (acs_namespace, "arccontainerstorage"), - (ssc_namespace, OpsServiceType.secretstore.value), + monitor_path = path.join(BASE_ZIP_PATH, arc_namespace, OpsServiceType.azuremonitor.value) + for namespace_folder, services in [ + (clusterconfig_namespace, ["clusterconfig"]), + (arc_namespace, ["arcagents"]), + (acs_namespace, ["arccontainerstorage"]), + (ssc_namespace, [OpsServiceType.secretstore.value]), ]: if namespace_folder: # remove empty folders in level 1 level_1 = walk_result.pop(path.join(BASE_ZIP_PATH, namespace_folder)) - assert level_1["folders"] == [service] + + if namespace_folder == arc_namespace and monitor_path in walk_result: + services.append(OpsServiceType.azuremonitor.value) + assert set(level_1["folders"]) == set(services) assert not level_1["files"] # remove empty folders in level 2 diff --git a/azext_edge/tests/edge/support/create_bundle_int/test_auto_int.py b/azext_edge/tests/edge/support/create_bundle_int/test_auto_int.py index 6ed087457..855394935 100644 --- a/azext_edge/tests/edge/support/create_bundle_int/test_auto_int.py +++ b/azext_edge/tests/edge/support/create_bundle_int/test_auto_int.py @@ -35,9 +35,9 @@ def generate_bundle_test_cases() -> List[Tuple[str, bool, Optional[str]]]: def test_create_bundle(init_setup, bundle_dir, mq_traces, ops_service, tracked_files): """Test to focus on ops_service param.""" - # skip arccontainerstorage for aio namespace check - if ops_service == OpsServiceType.arccontainerstorage.value: - pytest.skip("arccontainerstorage is not generated in aio namespace") + # skip arccontainerstorage and azuremonitor for aio namespace check + if ops_service in [OpsServiceType.arccontainerstorage.value, OpsServiceType.azuremonitor.value]: + pytest.skip(f"{ops_service} is not generated in aio namespace") command = f"az iot ops support create-bundle --broker-traces {mq_traces} " if bundle_dir: @@ -62,6 +62,7 @@ def test_create_bundle(init_setup, bundle_dir, mq_traces, ops_service, tracked_f aio_namespace = namespaces.get("aio") acs_namespace = namespaces.get("acs") ssc_namespace = namespaces.get("ssc") + arc_namespace = namespaces.get("arc") # Level 1 level_1 = walk_result.pop(path.join(BASE_ZIP_PATH, aio_namespace)) @@ -89,6 +90,10 @@ def test_create_bundle(init_setup, bundle_dir, mq_traces, ops_service, tracked_f if ssc_namespace: walk_result.pop(path.join(BASE_ZIP_PATH, ssc_namespace, OpsServiceType.secretstore.value), {}) + # remove azuremonitor resources in arc namespace from walk_result from aio namespace assertion + if arc_namespace and path.join(BASE_ZIP_PATH, arc_namespace, OpsServiceType.azuremonitor.value) in walk_result: + walk_result.pop(path.join(BASE_ZIP_PATH, arc_namespace, OpsServiceType.azuremonitor.value), {}) + # Level 2 and 3 - bottom is_billing_included = OpsServiceType.billing.value in expected_services actual_walk_result = len(expected_services) + int(is_billing_included) + len(ARC_AGENTS) @@ -144,5 +149,12 @@ def _get_expected_services( ): expected_services.remove(OpsServiceType.secretstore.value) + # azuremonitor folder will not be created if there are no azuremonitor resources + if ( + not walk_result.get(path.join(BASE_ZIP_PATH, namespace, OpsServiceType.azuremonitor.value)) + and OpsServiceType.azuremonitor.value in expected_services + ): + expected_services.remove(OpsServiceType.azuremonitor.value) + expected_services.append("meta") return expected_services diff --git a/azext_edge/tests/edge/support/create_bundle_int/test_azuremonitor_int.py b/azext_edge/tests/edge/support/create_bundle_int/test_azuremonitor_int.py new file mode 100644 index 000000000..fe02a2913 --- /dev/null +++ b/azext_edge/tests/edge/support/create_bundle_int/test_azuremonitor_int.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- + +from knack.log import get_logger +from azext_edge.edge.common import OpsServiceType +from azext_edge.edge.providers.edge_api import AZUREMONITOR_API_V1 +from .helpers import check_custom_resource_files, check_workload_resource_files, get_file_map, run_bundle_command + +logger = get_logger(__name__) + + +def test_create_bundle_azuremonitor(init_setup, tracked_files): + """Test for ensuring file names and content. ONLY CHECKS arcagents.""" + ops_service = OpsServiceType.azuremonitor.value + + command = f"az iot ops support create-bundle --ops-service {ops_service}" + walk_result, bundle_path = run_bundle_command(command=command, tracked_files=tracked_files) + file_map = get_file_map(walk_result, ops_service) + + check_custom_resource_files( + file_objs=file_map[OpsServiceType.azuremonitor.value], resource_api=AZUREMONITOR_API_V1 + ) + + # arc namespace + expected_workload_types = ["deployment", "pod", "replicaset", "service", "statefulset", "configmap"] + expected_types = set(expected_workload_types).union(AZUREMONITOR_API_V1.kinds) + assert set(file_map[OpsServiceType.azuremonitor.value].keys()).issubset(expected_types) + check_workload_resource_files( + file_objs=file_map[OpsServiceType.azuremonitor.value], + expected_workload_types=expected_workload_types, + prefixes=["diagnostics-operator", "diagnostics-v1"], + bundle_path=bundle_path, + ) diff --git a/azext_edge/tests/edge/support/test_azuremonitor_support_unit.py b/azext_edge/tests/edge/support/test_azuremonitor_support_unit.py new file mode 100644 index 000000000..23a6144a8 --- /dev/null +++ b/azext_edge/tests/edge/support/test_azuremonitor_support_unit.py @@ -0,0 +1,113 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- + +import random + +from azext_edge.edge.commands_edge import support_bundle +from azext_edge.edge.common import OpsServiceType +from azext_edge.edge.providers.support.azuremonitor import ( + MONITOR_DIRECTORY_PATH, + MONITOR_NAMESPACE, +) +from azext_edge.tests.edge.support.conftest import add_pod_to_mocked_pods +from azext_edge.tests.edge.support.test_support_unit import ( + assert_list_config_maps, + assert_list_deployments, + assert_list_pods, + assert_list_replica_sets, + assert_list_services, + assert_list_stateful_sets, +) + +from ...generators import generate_random_string + +a_bundle_dir = f"support_test_{generate_random_string()}" + + +def test_create_bundle_azuremonitor( + mocked_client, + mocked_config, + mocked_os_makedirs, + mocked_zipfile, + mocked_list_deployments, + mocked_list_pods, + mocked_list_replicasets, + mocked_list_statefulsets, + mocked_list_config_maps, + mocked_list_services, + mocked_list_nodes, + mocked_list_cluster_events, + mocked_list_storage_classes, + mocked_root_logger, + mocked_get_config_map, +): + since_seconds = random.randint(86400, 172800) + + add_pod_to_mocked_pods( + mocked_client=mocked_client, + expected_pod_map=mocked_list_pods, + mock_names=["diagnostics-operator-deployment", "diagnostics-v1-statefulset"], + mock_init_containers=True, + ) + + result = support_bundle( + None, + ops_services=[OpsServiceType.azuremonitor.value], + bundle_dir=a_bundle_dir, + log_age_seconds=since_seconds, + ) + + assert "bundlePath" in result + assert a_bundle_dir in result["bundlePath"] + + assert_list_pods( + mocked_client, + mocked_zipfile, + mocked_list_pods, + label_selector=None, + directory_path=MONITOR_DIRECTORY_PATH, + namespace=MONITOR_NAMESPACE, + since_seconds=since_seconds, + ) + assert_list_deployments( + mocked_client, + mocked_zipfile, + label_selector=None, + directory_path=MONITOR_DIRECTORY_PATH, + namespace=MONITOR_NAMESPACE, + mock_names=["diagnostics-operator-deployment"], + ) + assert_list_replica_sets( + mocked_client, + mocked_zipfile, + label_selector=None, + directory_path=MONITOR_DIRECTORY_PATH, + namespace=MONITOR_NAMESPACE, + mock_names=["diagnostics-operator-deployment"], + ) + assert_list_services( + mocked_client, + mocked_zipfile, + label_selector=None, + directory_path=MONITOR_DIRECTORY_PATH, + namespace=MONITOR_NAMESPACE, + mock_names=["diagnostics-operator-service"], + ) + assert_list_stateful_sets( + mocked_client, + mocked_zipfile, + label_selector=None, + directory_path=MONITOR_DIRECTORY_PATH, + namespace=MONITOR_NAMESPACE, + mock_names=["diagnostics-v1-statefulset"], + ) + assert_list_config_maps( + mocked_client, + mocked_zipfile, + directory_path=MONITOR_DIRECTORY_PATH, + label_selector=None, + mock_names=["diagnostics-v1-collector-config"], + ) diff --git a/azext_edge/tests/edge/support/test_support_unit.py b/azext_edge/tests/edge/support/test_support_unit.py index d6e2e601d..cc0bf8056 100644 --- a/azext_edge/tests/edge/support/test_support_unit.py +++ b/azext_edge/tests/edge/support/test_support_unit.py @@ -680,16 +680,26 @@ def assert_list_stateful_sets( directory_path: str, label_selector: Optional[str] = None, field_selector: Optional[str] = None, + mock_names: Optional[List[str]] = None, + namespace: Optional[str] = None, ): - mocked_client.AppsV1Api().list_stateful_set_for_all_namespaces.assert_any_call( - label_selector=label_selector, field_selector=field_selector - ) + if namespace: + mocked_client.AppsV1Api().list_namespaced_stateful_set.assert_any_call( + namespace=namespace, label_selector=label_selector, field_selector=field_selector + ) + else: + mocked_client.AppsV1Api().list_stateful_set_for_all_namespaces.assert_any_call( + label_selector=label_selector, field_selector=field_selector + ) - assert_zipfile_write( - mocked_zipfile, - zinfo=f"mock_namespace/{directory_path}/statefulset.mock_statefulset.yaml", - data="kind: Statefulset\nmetadata:\n name: mock_statefulset\n namespace: mock_namespace\n", - ) + mock_names = mock_names or ["mock_statefulset"] + + for name in mock_names: + assert_zipfile_write( + mocked_zipfile, + zinfo=f"mock_namespace/{directory_path}/statefulset.{name}.yaml", + data=f"kind: Statefulset\nmetadata:\n name: {name}\n namespace: mock_namespace\n", + ) def assert_list_services( From 8b2df4da74aaa3b084a8ce21928ab12dd0a093e5 Mon Sep 17 00:00:00 2001 From: Ryan K Date: Fri, 18 Oct 2024 11:52:01 -0700 Subject: [PATCH 03/26] feat: update init and create flows with M3 template (#411) --- azext_edge/constants.py | 2 +- azext_edge/edge/_help.py | 17 +- azext_edge/edge/commands_edge.py | 22 +- .../edge/providers/orchestration/deletion.py | 16 +- .../providers/orchestration/resource_map.py | 21 +- .../edge/providers/orchestration/targets.py | 60 ++--- .../edge/providers/orchestration/template.py | 219 +++++++++--------- .../edge/providers/orchestration/work.py | 66 +++--- .../tests/edge/init/int/test_init_int.py | 12 +- .../edge/orchestration/test_deletion_unit.py | 16 +- .../orchestration/test_resource_map_unit.py | 24 +- .../edge/orchestration/test_targets_unit.py | 42 ++-- .../edge/orchestration/test_template_unit.py | 36 +-- 13 files changed, 292 insertions(+), 261 deletions(-) diff --git a/azext_edge/constants.py b/azext_edge/constants.py index bd79a40aa..91b2532d5 100644 --- a/azext_edge/constants.py +++ b/azext_edge/constants.py @@ -7,7 +7,7 @@ import os -VERSION = "0.7.0b2" +VERSION = "0.8.0a1" EXTENSION_NAME = "azure-iot-ops" EXTENSION_ROOT = os.path.dirname(os.path.abspath(__file__)) USER_AGENT = "IotOperationsCliExtension/{}".format(VERSION) diff --git a/azext_edge/edge/_help.py b/azext_edge/edge/_help.py index a8849ac77..d6422e1fe 100644 --- a/azext_edge/edge/_help.py +++ b/azext_edge/edge/_help.py @@ -478,15 +478,14 @@ def load_iotops_help(): examples: - name: Usage with minimum input. This form will deploy the IoT Operations foundation layer. text: > - az iot ops init --cluster mycluster -g myresourcegroup --sr-resource-id $SCHEMA_REGISTRY_RESOURCE_ID + az iot ops init --cluster mycluster -g myresourcegroup - name: Similar to the prior example but with Arc Container Storage fault-tolerance enabled (requires at least 3 nodes). text: > - az iot ops init --cluster mycluster -g myresourcegroup --sr-resource-id $SCHEMA_REGISTRY_RESOURCE_ID - --enable-fault-tolerance + az iot ops init --cluster mycluster -g myresourcegroup --enable-fault-tolerance - name: This example highlights trust settings for a user provided cert manager config. text: > - az iot ops init --cluster mycluster -g myresourcegroup --sr-resource-id $SCHEMA_REGISTRY_RESOURCE_ID - --trust-settings configMapName=example-bundle configMapKey=trust-bundle.pem issuerKind=ClusterIssuer + az iot ops init --cluster mycluster -g myresourcegroup --trust-settings + configMapName=example-bundle configMapKey=trust-bundle.pem issuerKind=ClusterIssuer issuerName=trust-manager-selfsigned-issuer """ @@ -505,23 +504,23 @@ def load_iotops_help(): examples: - name: Create the target instance with minimum input. text: > - az iot ops create --cluster mycluster -g myresourcegroup --name myinstance + az iot ops create --cluster mycluster -g myresourcegroup --name myinstance --sr-resource-id $SCHEMA_REGISTRY_RESOURCE_ID - name: The following example adds customization to the default broker instance resource as well as an instance description and tags. text: > - az iot ops create --cluster mycluster -g myresourcegroup --name myinstance + az iot ops create --cluster mycluster -g myresourcegroup --name myinstance --sr-resource-id $SCHEMA_REGISTRY_RESOURCE_ID --broker-mem-profile High --broker-backend-workers 4 --description 'Contoso Factory' --tags tier=testX1 - name: This example shows deploying an additional insecure (no authn or authz) broker listener configured for port 1883 of service type load balancer. Useful for testing and/or demos. Do not use the insecure option in production. text: > - az iot ops create --cluster mycluster -g myresourcegroup --name myinstance + az iot ops create --cluster mycluster -g myresourcegroup --name myinstance --sr-resource-id $SCHEMA_REGISTRY_RESOURCE_ID --add-insecure-listener - name: This form shows how to enable resource sync for the instance deployment. To enable resource sync role assignment write is necessary on the target resource group. text: > - az iot ops create --cluster mycluster -g myresourcegroup --name myinstance + az iot ops create --cluster mycluster -g myresourcegroup --name myinstance --sr-resource-id $SCHEMA_REGISTRY_RESOURCE_ID --enable-rsync """ diff --git a/azext_edge/edge/commands_edge.py b/azext_edge/edge/commands_edge.py index 156c19a6c..e79e20014 100644 --- a/azext_edge/edge/commands_edge.py +++ b/azext_edge/edge/commands_edge.py @@ -109,13 +109,8 @@ def init( cmd, cluster_name: str, resource_group_name: str, - schema_registry_resource_id: str, - container_runtime_socket: Optional[str] = None, - kubernetes_distro: str = KubernetesDistroType.k8s.value, trust_settings: Optional[List[str]] = None, enable_fault_tolerance: Optional[bool] = None, - ops_config: Optional[List[str]] = None, - ops_version: Optional[str] = None, no_progress: Optional[bool] = None, ensure_latest: Optional[bool] = None, **kwargs, @@ -134,13 +129,8 @@ def init( pre_flight=not no_pre_flight, cluster_name=cluster_name, resource_group_name=resource_group_name, - container_runtime_socket=container_runtime_socket, - kubernetes_distro=kubernetes_distro, enable_fault_tolerance=enable_fault_tolerance, - ops_config=ops_config, - ops_version=ops_version, trust_settings=trust_settings, - schema_registry_resource_id=schema_registry_resource_id, ) @@ -149,12 +139,18 @@ def create_instance( cluster_name: str, resource_group_name: str, instance_name: str, + schema_registry_resource_id: str, cluster_namespace: str = DEFAULT_NAMESPACE, location: Optional[str] = None, custom_location_name: Optional[str] = None, enable_rsync_rules: Optional[bool] = None, instance_description: Optional[str] = None, dataflow_profile_instances: int = 1, + # Ops extension + container_runtime_socket: Optional[str] = None, + kubernetes_distro: str = KubernetesDistroType.k8s.value, + ops_config: Optional[List[str]] = None, + ops_version: Optional[str] = None, # Broker custom_broker_config_file: Optional[str] = None, broker_memory_profile: str = MqMemoryProfile.medium.value, @@ -196,6 +192,7 @@ def create_instance( cluster_name=cluster_name, resource_group_name=resource_group_name, cluster_namespace=cluster_namespace, + schema_registry_resource_id=schema_registry_resource_id, location=location, custom_location_name=custom_location_name, enable_rsync_rules=enable_rsync_rules, @@ -203,6 +200,11 @@ def create_instance( instance_description=instance_description, add_insecure_listener=add_insecure_listener, dataflow_profile_instances=dataflow_profile_instances, + # Ops Extension + container_runtime_socket=container_runtime_socket, + kubernetes_distro=kubernetes_distro, + ops_config=ops_config, + ops_version=ops_version, # Broker custom_broker_config=custom_broker_config, broker_memory_profile=broker_memory_profile, diff --git a/azext_edge/edge/providers/orchestration/deletion.py b/azext_edge/edge/providers/orchestration/deletion.py index 35fb47411..8375fdb78 100644 --- a/azext_edge/edge/providers/orchestration/deletion.py +++ b/azext_edge/edge/providers/orchestration/deletion.py @@ -16,6 +16,8 @@ from rich.progress import Progress, SpinnerColumn, TimeElapsedColumn from rich.table import Table +from azext_edge.edge.providers.orchestration.work import IOT_OPS_EXTENSION_TYPE + from ...util.az_client import get_resource_client, wait_for_terminal_states from ...util.common import should_continue_prompt from .resource_map import IoTOperationsResource, IoTOperationsResourceMap @@ -111,7 +113,7 @@ def _get_resource_map(self) -> IoTOperationsResourceMap: def _display_resource_tree(self): if self._render_progress: - print(self.resource_map.build_tree(hide_extensions=not self.include_dependencies, category_color="red")) + print(self.resource_map.build_tree(include_dependencies=self.include_dependencies, category_color="red")) def _render_display(self, description: str): if self._render_progress: @@ -141,6 +143,18 @@ def _process(self, force: bool = False): todo_extensions = [] if self.include_dependencies: todo_extensions.extend(self.resource_map.extensions) + else: + # instance delete should delete AIO extension too + # TODO: @c-ryan-k hacky + aio_ext_obj = self.resource_map.connected_cluster.get_extensions_by_type(IOT_OPS_EXTENSION_TYPE).get( + IOT_OPS_EXTENSION_TYPE, {} + ) + aio_ext_id: str = aio_ext_obj.get("id", "") + aio_ext = next( + (_ for _ in self.resource_map.extensions if _.resource_id.lower() == aio_ext_id.lower()), None + ) + if aio_ext: + todo_extensions.append(aio_ext) todo_custom_locations = self.resource_map.custom_locations todo_resource_sync_rules = [] todo_resources = [] diff --git a/azext_edge/edge/providers/orchestration/resource_map.py b/azext_edge/edge/providers/orchestration/resource_map.py index 4af8b24c1..dbe4d9eb7 100644 --- a/azext_edge/edge/providers/orchestration/resource_map.py +++ b/azext_edge/edge/providers/orchestration/resource_map.py @@ -48,9 +48,10 @@ def __init__( cluster_name: str, resource_group_name: str, subscription_id: Optional[str] = None, - defer_refresh: bool = False + defer_refresh: bool = False, ): from azure.cli.core.commands.client_factory import get_subscription_id + self.subscription_id = subscription_id or get_subscription_id(cli_ctx=cmd.cli_ctx) self.connected_cluster = ConnectedCluster( cmd=cmd, @@ -133,11 +134,23 @@ def refresh_resource_state(self): self._cluster_container = refreshed_cluster_container - def build_tree(self, hide_extensions: bool = False, category_color: str = "cyan") -> Tree: + def build_tree(self, include_dependencies: bool = False, category_color: str = "cyan") -> Tree: + from .work import IOT_OPS_EXTENSION_TYPE + tree = Tree(f"[green]{self.connected_cluster.cluster_name}") - if not hide_extensions: - extensions_node = tree.add(label=f"[{category_color}]extensions") + extensions_node = tree.add(label=f"[{category_color}]extensions") + if not include_dependencies: + # only show aio extension + # TODO: @c-ryan-k hacky + aio_ext_obj = self.connected_cluster.get_extensions_by_type(IOT_OPS_EXTENSION_TYPE).get( + IOT_OPS_EXTENSION_TYPE, {} + ) + aio_ext_id: str = aio_ext_obj.get("id", "") + aio_ext = next((_ for _ in self.extensions if _.resource_id.lower() == aio_ext_id.lower()), None) + if aio_ext: + extensions_node.add(aio_ext.display_name) + else: [extensions_node.add(ext.display_name) for ext in self.extensions] custom_locations = self.custom_locations diff --git a/azext_edge/edge/providers/orchestration/targets.py b/azext_edge/edge/providers/orchestration/targets.py index 16239c531..9c4e1f505 100644 --- a/azext_edge/edge/providers/orchestration/targets.py +++ b/azext_edge/edge/providers/orchestration/targets.py @@ -16,7 +16,7 @@ DEFAULT_DATAFLOW_PROFILE, ) from ...util import assemble_nargs_to_dict -from ...util.az_client import parse_resource_id, REGISTRY_API_VERSION +from ...util.az_client import parse_resource_id from ..orchestration.common import ( TRUST_ISSUER_KIND_KEY, TRUST_SETTING_KEYS, @@ -24,8 +24,8 @@ from .common import KubernetesDistroType from .template import ( IOT_OPERATIONS_VERSION_MONIKER, - M2_ENABLEMENT_TEMPLATE, - M2_INSTANCE_TEMPLATE, + M3_ENABLEMENT_TEMPLATE, + M3_INSTANCE_TEMPLATE, TemplateBlueprint, get_insecure_listener, ) @@ -137,7 +137,7 @@ def iot_operations_version(self): def get_extension_versions(self) -> dict: # Don't need a deep copy here. - return M2_ENABLEMENT_TEMPLATE.content["variables"]["VERSIONS"].copy() + return M3_ENABLEMENT_TEMPLATE.content["variables"]["VERSIONS"].copy() def get_ops_enablement_template( self, @@ -145,43 +145,18 @@ def get_ops_enablement_template( template, parameters = self._handle_apply_targets( param_to_target={ "clusterName": self.cluster_name, - "kubernetesDistro": self.kubernetes_distro, - "containerRuntimeSocket": self.container_runtime_socket, "trustConfig": self.trust_config, - "schemaRegistryId": self.schema_registry_resource_id, "advancedConfig": self.advanced_config, }, - template_blueprint=M2_ENABLEMENT_TEMPLATE, + template_blueprint=M3_ENABLEMENT_TEMPLATE, ) - if self.ops_config: - aio_default_config: Dict[str, str] = template.content["variables"]["defaultAioConfigurationSettings"] - aio_default_config.update(self.ops_config) - - if self.ops_version: - template.content["variables"]["VERSIONS"]["aio"] = self.ops_version - # TODO - @digimaun - expand trustSource for self managed & trustBundleSettings return template.content, parameters def get_ops_instance_template( - self, cl_extension_ids: List[str], ops_extension_config: Dict[str, str] + self, cl_extension_ids: List[str], ) -> Tuple[dict, dict]: - # Set the schema registry resource Id from the extension config - self.schema_registry_resource_id = ops_extension_config.get("schemaRegistry.values.resourceId") - trust_source = ops_extension_config.get("trustSource") - - if trust_source == "CustomerManaged": - trust_issuer_name = ops_extension_config.get("trustBundleSettings.issuer.name") - trust_issuer_kind = ops_extension_config.get("trustBundleSettings.issuer.kind") - trust_configmap_name = ops_extension_config.get("trustBundleSettings.configMap.name") - trust_configmap_key = ops_extension_config.get("trustBundleSettings.configMap.key") - self.trust_settings = { - "issuerName": trust_issuer_name, - "issuerKind": trust_issuer_kind, - "configMapName": trust_configmap_name, - "configMapKey": trust_configmap_key, - } self.trust_config = self.get_trust_settings_target_map() template, parameters = self._handle_apply_targets( @@ -189,6 +164,8 @@ def get_ops_instance_template( "clusterName": self.cluster_name, "clusterNamespace": self.cluster_namespace, "clusterLocation": self.location, + "kubernetesDistro": self.kubernetes_distro, + "containerRuntimeSocket": self.container_runtime_socket, "customLocationName": self.custom_location_name, "clExtentionIds": cl_extension_ids, "deployResourceSyncRules": self.deploy_resource_sync_rules, @@ -197,15 +174,22 @@ def get_ops_instance_template( "brokerConfig": self.broker_config, "trustConfig": self.trust_config, }, - template_blueprint=M2_INSTANCE_TEMPLATE, + template_blueprint=M3_INSTANCE_TEMPLATE, ) + + if self.ops_config: + aio_default_config: Dict[str, str] = template.content["variables"]["defaultAioConfigurationSettings"] + aio_default_config.update(self.ops_config) + + if self.ops_version: + template.content["variables"]["VERSIONS"]["iotOperations"] = self.ops_version + instance = template.get_resource_by_key("aioInstance") instance["properties"]["description"] = self.instance_description - # TODO: this is temporary for this milestone. Next milestone it should change. - instance["properties"][ - "schemaRegistryNamespace" - ] = f"[reference(parameters('schemaRegistryId'), '{REGISTRY_API_VERSION}').namespace]" + instance["properties"]["schemaRegistryRef"] = { + "resourceId": "[parameters('schemaRegistryId')]" + } if self.tags: instance["tags"] = self.tags @@ -250,7 +234,7 @@ def get_broker_config_target_map(self): processed_config_map = {} validation_errors = [] - broker_config_def = M2_INSTANCE_TEMPLATE.get_type_definition("_1.BrokerConfig")["properties"] + broker_config_def = M3_INSTANCE_TEMPLATE.get_type_definition("_1.BrokerConfig")["properties"] for config in to_process_config_map: if to_process_config_map[config] is None: continue @@ -294,7 +278,7 @@ def get_trust_settings_target_map(self) -> dict: if self.trust_settings: target_settings: Dict[str, str] = {} result["source"] = "CustomerManaged" - trust_bundle_def = M2_ENABLEMENT_TEMPLATE.get_type_definition("_1.TrustBundleSettings")["properties"] + trust_bundle_def = M3_ENABLEMENT_TEMPLATE.get_type_definition("_1.TrustBundleSettings")["properties"] allowed_issuer_kinds: Optional[List[str]] = trust_bundle_def.get(TRUST_ISSUER_KIND_KEY, {}).get( "allowedValues" ) diff --git a/azext_edge/edge/providers/orchestration/template.py b/azext_edge/edge/providers/orchestration/template.py index d099df4f4..73c38cdd8 100644 --- a/azext_edge/edge/providers/orchestration/template.py +++ b/azext_edge/edge/providers/orchestration/template.py @@ -47,16 +47,16 @@ def copy(self) -> "TemplateBlueprint": ) -IOT_OPERATIONS_VERSION_MONIKER = "v0.7.0-preview" +IOT_OPERATIONS_VERSION_MONIKER = "v0.8.0-preview" -M2_ENABLEMENT_TEMPLATE = TemplateBlueprint( +M3_ENABLEMENT_TEMPLATE = TemplateBlueprint( commit_id="78864ae529f698cf1c9bf0be0a6957e6c9f3cf38", content={ "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", "languageVersion": "2.0", "contentVersion": "1.0.0.0", "metadata": { - "_generator": {"name": "bicep", "version": "0.30.23.60470", "templateHash": "7995831049546950052"} + "_generator": {"name": "bicep", "version": "0.30.23.60470", "templateHash": "17597461722386619555"} }, "definitions": { "_1.AdvancedConfig": { @@ -90,6 +90,7 @@ def copy(self) -> "TemplateBlueprint": "observability": { "type": "object", "properties": { + "enabled": {"type": "bool", "nullable": True}, "otelCollectorAddress": {"type": "string", "nullable": True}, "otelExportIntervalSeconds": {"type": "int", "nullable": True}, }, @@ -207,69 +208,25 @@ def copy(self) -> "TemplateBlueprint": }, "parameters": { "clusterName": {"type": "string"}, - "kubernetesDistro": {"type": "string", "defaultValue": "K8s", "allowedValues": ["K3s", "K8s", "MicroK8s"]}, - "containerRuntimeSocket": {"type": "string", "defaultValue": ""}, "trustConfig": {"$ref": "#/definitions/_1.TrustConfig", "defaultValue": {"source": "SelfSigned"}}, - "schemaRegistryId": {"type": "string"}, "advancedConfig": {"$ref": "#/definitions/_1.AdvancedConfig", "defaultValue": {}}, }, "variables": { - "AIO_EXTENSION_SCOPE": {"cluster": {"releaseNamespace": "azure-iot-operations"}}, - "AIO_EXTENSION_SUFFIX": "[take(uniqueString(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName'))), 5)]", "VERSIONS": { "platform": "0.7.6", - "aio": "0.7.31", - "secretSyncController": "0.6.4", - "edgeStorageAccelerator": "2.1.1-preview", - "openServiceMesh": "1.2.9", + "secretStore": "0.6.4", + "containerStorage": "2.2.0", + "openServiceMesh": "1.2.10", }, "TRAINS": { "platform": "preview", - "aio": "preview", - "secretSyncController": "preview", + "secretStore": "preview", + "containerStorage": "preview", "openServiceMesh": "stable", - "edgeStorageAccelerator": "stable", - }, - "OBSERVABILITY_ENABLED": "[not(equals(tryGet(tryGet(parameters('advancedConfig'), 'observability'), 'otelCollectorAddress'), null()))]", - "MQTT_SETTINGS": { - "brokerListenerServiceName": "aio-broker", - "brokerListenerPort": 18883, - "serviceAccountAudience": "aio-internal", - "selfSignedIssuerName": "[format('{0}-aio-certificate-issuer', variables('AIO_EXTENSION_SCOPE').cluster.releaseNamespace)]", }, "faultTolerantStorageClass": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'edgeStorageAccelerator'), 'diskStorageClass'), 'acstor-arccontainerstorage-storage-pool')]", "nonFaultTolerantStorageClass": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'edgeStorageAccelerator'), 'diskStorageClass'), 'default,local-path')]", "kubernetesStorageClass": "[if(equals(tryGet(tryGet(parameters('advancedConfig'), 'edgeStorageAccelerator'), 'faultToleranceEnabled'), true()), variables('faultTolerantStorageClass'), variables('nonFaultTolerantStorageClass'))]", - "defaultAioConfigurationSettings": { - "AgentOperationTimeoutInMinutes": 120, - "connectors.values.mqttBroker.address": "[format('mqtts://{0}.{1}:{2}', variables('MQTT_SETTINGS').brokerListenerServiceName, variables('AIO_EXTENSION_SCOPE').cluster.releaseNamespace, variables('MQTT_SETTINGS').brokerListenerPort)]", - "connectors.values.mqttBroker.serviceAccountTokenAudience": "[variables('MQTT_SETTINGS').serviceAccountAudience]", - "connectors.values.opcPlcSimulation.deploy": "false", - "connectors.values.opcPlcSimulation.autoAcceptUntrustedCertificates": "false", - "connectors.values.discoveryHandler.enabled": "false", - "adr.values.Microsoft.CustomLocation.ServiceAccount": "default", - "akri.values.webhookConfiguration.enabled": "false", - "akri.values.certManagerWebhookCertificate.enabled": "false", - "akri.values.agent.extensionService.mqttBroker.hostName": "[format('{0}.{1}', variables('MQTT_SETTINGS').brokerListenerServiceName, variables('AIO_EXTENSION_SCOPE').cluster.releaseNamespace)]", - "akri.values.agent.extensionService.mqttBroker.port": "[variables('MQTT_SETTINGS').brokerListenerPort]", - "akri.values.agent.extensionService.mqttBroker.serviceAccountAudience": "[variables('MQTT_SETTINGS').serviceAccountAudience]", - "akri.values.agent.host.containerRuntimeSocket": "[parameters('containerRuntimeSocket')]", - "akri.values.kubernetesDistro": "[toLower(parameters('kubernetesDistro'))]", - "mqttBroker.values.global.quickstart": "false", - "mqttBroker.values.operator.firstPartyMetricsOn": "true", - "observability.metrics.enabled": "[format('{0}', variables('OBSERVABILITY_ENABLED'))]", - "observability.metrics.openTelemetryCollectorAddress": "[if(variables('OBSERVABILITY_ENABLED'), format('{0}', tryGet(tryGet(parameters('advancedConfig'), 'observability'), 'otelCollectorAddress')), '')]", - "observability.metrics.exportIntervalSeconds": "[format('{0}', coalesce(tryGet(tryGet(parameters('advancedConfig'), 'observability'), 'otelExportIntervalSeconds'), 60))]", - "trustSource": "[parameters('trustConfig').source]", - "trustBundleSettings.issuer.name": "[if(equals(parameters('trustConfig').source, 'CustomerManaged'), parameters('trustConfig').settings.issuerName, variables('MQTT_SETTINGS').selfSignedIssuerName)]", - "trustBundleSettings.issuer.kind": "[coalesce(tryGet(tryGet(parameters('trustConfig'), 'settings'), 'issuerKind'), '')]", - "trustBundleSettings.configMap.name": "[coalesce(tryGet(tryGet(parameters('trustConfig'), 'settings'), 'configMapName'), '')]", - "trustBundleSettings.configMap.key": "[coalesce(tryGet(tryGet(parameters('trustConfig'), 'settings'), 'configMapKey'), '')]", - "schemaRegistry.values.resourceId": "[parameters('schemaRegistryId')]", - "schemaRegistry.values.mqttBroker.host": "[format('mqtts://{0}.{1}:{2}', variables('MQTT_SETTINGS').brokerListenerServiceName, variables('AIO_EXTENSION_SCOPE').cluster.releaseNamespace, variables('MQTT_SETTINGS').brokerListenerPort)]", - "schemaRegistry.values.mqttBroker.tlsEnabled": True, - "schemaRegistry.values.mqttBroker.serviceAccountTokenAudience": "[variables('MQTT_SETTINGS').serviceAccountAudience]", - }, }, "resources": { "cluster": { @@ -282,7 +239,7 @@ def copy(self) -> "TemplateBlueprint": "type": "Microsoft.KubernetesConfiguration/extensions", "apiVersion": "2023-05-01", "scope": "[format('Microsoft.Kubernetes/connectedClusters/{0}', parameters('clusterName'))]", - "name": "[format('azure-iot-operations-platform-{0}', variables('AIO_EXTENSION_SUFFIX'))]", + "name": "azure-iot-operations-platform", "identity": {"type": "SystemAssigned"}, "properties": { "extensionType": "microsoft.iotoperations.platform", @@ -297,7 +254,7 @@ def copy(self) -> "TemplateBlueprint": }, "dependsOn": ["cluster"], }, - "secret_sync_controller_extension": { + "secret_store_extension": { "type": "Microsoft.KubernetesConfiguration/extensions", "apiVersion": "2023-05-01", "scope": "[format('Microsoft.Kubernetes/connectedClusters/{0}', parameters('clusterName'))]", @@ -305,8 +262,8 @@ def copy(self) -> "TemplateBlueprint": "identity": {"type": "SystemAssigned"}, "properties": { "extensionType": "microsoft.azure.secretstore", - "version": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'secretSyncController'), 'version'), variables('VERSIONS').secretSyncController)]", - "releaseTrain": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'secretSyncController'), 'train'), variables('TRAINS').secretSyncController)]", + "version": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'secretSyncController'), 'version'), variables('VERSIONS').secretStore)]", + "releaseTrain": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'secretSyncController'), 'train'), variables('TRAINS').secretStore)]", "autoUpgradeMinorVersion": False, "configurationSettings": { "rotationPollIntervalInSeconds": "120", @@ -319,7 +276,7 @@ def copy(self) -> "TemplateBlueprint": "type": "Microsoft.KubernetesConfiguration/extensions", "apiVersion": "2023-05-01", "scope": "[format('Microsoft.Kubernetes/connectedClusters/{0}', parameters('clusterName'))]", - "name": "[format('open-service-mesh-{0}', variables('AIO_EXTENSION_SUFFIX'))]", + "name": "open-service-mesh", "properties": { "extensionType": "microsoft.openservicemesh", "autoUpgradeMinorVersion": False, @@ -333,7 +290,7 @@ def copy(self) -> "TemplateBlueprint": }, "dependsOn": ["cluster"], }, - "edge_storage_accelerator_extension": { + "container_storage_extension": { "type": "Microsoft.KubernetesConfiguration/extensions", "apiVersion": "2023-05-01", "scope": "[format('Microsoft.Kubernetes/connectedClusters/{0}', parameters('clusterName'))]", @@ -342,76 +299,48 @@ def copy(self) -> "TemplateBlueprint": "properties": { "extensionType": "Microsoft.Arc.ContainerStorage", "autoUpgradeMinorVersion": False, - "version": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'edgeStorageAccelerator'), 'version'), variables('VERSIONS').edgeStorageAccelerator)]", - "releaseTrain": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'edgeStorageAccelerator'), 'train'), variables('TRAINS').edgeStorageAccelerator)]", + "version": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'edgeStorageAccelerator'), 'version'), variables('VERSIONS').containerStorage)]", + "releaseTrain": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'edgeStorageAccelerator'), 'train'), variables('TRAINS').containerStorage)]", "configurationSettings": "[union(createObject('edgeStorageConfiguration.create', 'true', 'feature.diskStorageClass', variables('kubernetesStorageClass')), if(equals(tryGet(tryGet(parameters('advancedConfig'), 'edgeStorageAccelerator'), 'faultToleranceEnabled'), true()), createObject('acstorConfiguration.create', 'true', 'acstorConfiguration.properties.diskMountPoint', '/mnt'), createObject()))]", }, "dependsOn": ["aio_platform_extension", "cluster", "open_service_mesh_extension"], }, - "aio_extension": { - "type": "Microsoft.KubernetesConfiguration/extensions", - "apiVersion": "2023-05-01", - "scope": "[format('Microsoft.Kubernetes/connectedClusters/{0}', parameters('clusterName'))]", - "name": "[format('azure-iot-operations-{0}', variables('AIO_EXTENSION_SUFFIX'))]", - "identity": {"type": "SystemAssigned"}, - "properties": { - "extensionType": "microsoft.iotoperations", - "version": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'aio'), 'version'), variables('VERSIONS').aio)]", - "releaseTrain": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'aio'), 'train'), variables('TRAINS').aio)]", - "autoUpgradeMinorVersion": False, - "scope": "[variables('AIO_EXTENSION_SCOPE')]", - "configurationSettings": "[union(variables('defaultAioConfigurationSettings'), coalesce(tryGet(tryGet(parameters('advancedConfig'), 'aio'), 'configurationSettingsOverride'), createObject()))]", - }, - "dependsOn": ["aio_platform_extension", "cluster"], - }, }, "outputs": { "clExtensionIds": { "type": "array", "items": {"type": "string"}, "value": [ - "[extensionResourceId(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName')), 'Microsoft.KubernetesConfiguration/extensions', format('azure-iot-operations-platform-{0}', variables('AIO_EXTENSION_SUFFIX')))]", - "[extensionResourceId(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName')), 'Microsoft.KubernetesConfiguration/extensions', format('azure-iot-operations-{0}', variables('AIO_EXTENSION_SUFFIX')))]", + "[extensionResourceId(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName')), 'Microsoft.KubernetesConfiguration/extensions', 'azure-iot-operations-platform')]", "[extensionResourceId(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName')), 'Microsoft.KubernetesConfiguration/extensions', 'azure-secret-store')]", ], }, "extensions": { "type": "object", "value": { - "aio": { - "name": "[format('azure-iot-operations-{0}', variables('AIO_EXTENSION_SUFFIX'))]", - "id": "[extensionResourceId(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName')), 'Microsoft.KubernetesConfiguration/extensions', format('azure-iot-operations-{0}', variables('AIO_EXTENSION_SUFFIX')))]", - "version": "[reference('aio_extension').version]", - "releaseTrain": "[reference('aio_extension').releaseTrain]", - "config": { - "brokerListenerName": "[variables('MQTT_SETTINGS').brokerListenerServiceName]", - "brokerListenerPort": "[variables('MQTT_SETTINGS').brokerListenerPort]", - }, - "identityPrincipalId": "[reference('aio_extension', '2023-05-01', 'full').identity.principalId]", - }, "platform": { - "name": "[format('azure-iot-operations-platform-{0}', variables('AIO_EXTENSION_SUFFIX'))]", - "id": "[extensionResourceId(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName')), 'Microsoft.KubernetesConfiguration/extensions', format('azure-iot-operations-platform-{0}', variables('AIO_EXTENSION_SUFFIX')))]", + "name": "azure-iot-operations-platform", + "id": "[extensionResourceId(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName')), 'Microsoft.KubernetesConfiguration/extensions', 'azure-iot-operations-platform')]", "version": "[reference('aio_platform_extension').version]", "releaseTrain": "[reference('aio_platform_extension').releaseTrain]", }, - "secretSyncController": { + "secretStore": { "name": "azure-secret-store", "id": "[extensionResourceId(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName')), 'Microsoft.KubernetesConfiguration/extensions', 'azure-secret-store')]", - "version": "[reference('secret_sync_controller_extension').version]", - "releaseTrain": "[reference('secret_sync_controller_extension').releaseTrain]", + "version": "[reference('secret_store_extension').version]", + "releaseTrain": "[reference('secret_store_extension').releaseTrain]", }, "openServiceMesh": { - "name": "[format('open-service-mesh-{0}', variables('AIO_EXTENSION_SUFFIX'))]", - "id": "[extensionResourceId(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName')), 'Microsoft.KubernetesConfiguration/extensions', format('open-service-mesh-{0}', variables('AIO_EXTENSION_SUFFIX')))]", + "name": "open-service-mesh", + "id": "[extensionResourceId(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName')), 'Microsoft.KubernetesConfiguration/extensions', 'open-service-mesh')]", "version": "[reference('open_service_mesh_extension').version]", "releaseTrain": "[reference('open_service_mesh_extension').releaseTrain]", }, - "edgeStorageAccelerator": { + "containerStorage": { "name": "azure-arc-containerstorage", "id": "[extensionResourceId(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName')), 'Microsoft.KubernetesConfiguration/extensions', 'azure-arc-containerstorage')]", - "version": "[reference('edge_storage_accelerator_extension').version]", - "releaseTrain": "[reference('edge_storage_accelerator_extension').releaseTrain]", + "version": "[reference('container_storage_extension').version]", + "releaseTrain": "[reference('container_storage_extension').releaseTrain]", }, }, }, @@ -419,14 +348,14 @@ def copy(self) -> "TemplateBlueprint": }, ) -M2_INSTANCE_TEMPLATE = TemplateBlueprint( +M3_INSTANCE_TEMPLATE = TemplateBlueprint( commit_id="373335547851df70d512b7ec81aedfba0d660ae5", content={ "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", "languageVersion": "2.0", "contentVersion": "1.0.0.0", "metadata": { - "_generator": {"name": "bicep", "version": "0.30.23.60470", "templateHash": "6194687573320740755"} + "_generator": {"name": "bicep", "version": "0.30.23.60470", "templateHash": "8789011211011918491"} }, "definitions": { "_1.AdvancedConfig": { @@ -460,6 +389,7 @@ def copy(self) -> "TemplateBlueprint": "observability": { "type": "object", "properties": { + "enabled": {"type": "bool", "nullable": True}, "otelCollectorAddress": {"type": "string", "nullable": True}, "otelExportIntervalSeconds": {"type": "int", "nullable": True}, }, @@ -579,6 +509,8 @@ def copy(self) -> "TemplateBlueprint": "clusterName": {"type": "string"}, "clusterNamespace": {"type": "string", "defaultValue": "azure-iot-operations"}, "clusterLocation": {"type": "string", "defaultValue": "[resourceGroup().location]"}, + "kubernetesDistro": {"type": "string", "defaultValue": "K8s", "allowedValues": ["K3s", "K8s", "MicroK8s"]}, + "containerRuntimeSocket": {"type": "string", "defaultValue": ""}, "customLocationName": { "type": "string", "defaultValue": "[format('location-{0}', coalesce(tryGet(parameters('advancedConfig'), 'resourceSuffix'), take(uniqueString(resourceGroup().id, parameters('clusterName'), parameters('clusterNamespace')), 5)))]", @@ -593,6 +525,10 @@ def copy(self) -> "TemplateBlueprint": "advancedConfig": {"$ref": "#/definitions/_1.AdvancedConfig", "defaultValue": {}}, }, "variables": { + "AIO_EXTENSION_SUFFIX": "[take(uniqueString(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName'))), 5)]", + "AIO_EXTENSION_SCOPE": {"cluster": {"releaseNamespace": "azure-iot-operations"}}, + "VERSIONS": {"iotOperations": "0.8.16"}, + "TRAINS": {"iotOperations": "integration"}, "MQTT_SETTINGS": { "brokerListenerServiceName": "aio-broker", "brokerListenerPort": 18883, @@ -609,6 +545,35 @@ def copy(self) -> "TemplateBlueprint": "memoryProfile": "[coalesce(tryGet(parameters('brokerConfig'), 'memoryProfile'), 'Medium')]", "serviceType": "[coalesce(tryGet(parameters('brokerConfig'), 'serviceType'), 'ClusterIp')]", }, + "defaultAioConfigurationSettings": { + "AgentOperationTimeoutInMinutes": 120, + "connectors.values.mqttBroker.address": "[format('mqtts://{0}.{1}:{2}', variables('MQTT_SETTINGS').brokerListenerServiceName, variables('AIO_EXTENSION_SCOPE').cluster.releaseNamespace, variables('MQTT_SETTINGS').brokerListenerPort)]", + "connectors.values.mqttBroker.serviceAccountTokenAudience": "[variables('MQTT_SETTINGS').serviceAccountAudience]", + "connectors.values.opcPlcSimulation.deploy": "false", + "connectors.values.opcPlcSimulation.autoAcceptUntrustedCertificates": "false", + "connectors.values.discoveryHandler.enabled": "false", + "adr.values.Microsoft.CustomLocation.ServiceAccount": "default", + "akri.values.webhookConfiguration.enabled": "false", + "akri.values.certManagerWebhookCertificate.enabled": "false", + "akri.values.agent.extensionService.mqttBroker.hostName": "[format('{0}.{1}', variables('MQTT_SETTINGS').brokerListenerServiceName, variables('AIO_EXTENSION_SCOPE').cluster.releaseNamespace)]", + "akri.values.agent.extensionService.mqttBroker.port": "[variables('MQTT_SETTINGS').brokerListenerPort]", + "akri.values.agent.extensionService.mqttBroker.serviceAccountAudience": "[variables('MQTT_SETTINGS').serviceAccountAudience]", + "akri.values.agent.host.containerRuntimeSocket": "[parameters('containerRuntimeSocket')]", + "akri.values.kubernetesDistro": "[toLower(parameters('kubernetesDistro'))]", + "mqttBroker.values.global.quickstart": "false", + "mqttBroker.values.operator.firstPartyMetricsOn": "true", + "observability.metrics.enabled": "[format('{0}', coalesce(tryGet(tryGet(parameters('advancedConfig'), 'observability'), 'enabled'), false()))]", + "observability.metrics.openTelemetryCollectorAddress": "[if(coalesce(tryGet(tryGet(parameters('advancedConfig'), 'observability'), 'enabled'), false()), format('{0}', tryGet(tryGet(parameters('advancedConfig'), 'observability'), 'otelCollectorAddress')), '')]", + "observability.metrics.exportIntervalSeconds": "[format('{0}', coalesce(tryGet(tryGet(parameters('advancedConfig'), 'observability'), 'otelExportIntervalSeconds'), 60))]", + "trustSource": "[parameters('trustConfig').source]", + "trustBundleSettings.issuer.name": "[if(equals(parameters('trustConfig').source, 'CustomerManaged'), parameters('trustConfig').settings.issuerName, variables('MQTT_SETTINGS').selfSignedIssuerName)]", + "trustBundleSettings.issuer.kind": "[coalesce(tryGet(tryGet(parameters('trustConfig'), 'settings'), 'issuerKind'), '')]", + "trustBundleSettings.configMap.name": "[coalesce(tryGet(tryGet(parameters('trustConfig'), 'settings'), 'configMapName'), '')]", + "trustBundleSettings.configMap.key": "[coalesce(tryGet(tryGet(parameters('trustConfig'), 'settings'), 'configMapKey'), '')]", + "schemaRegistry.values.mqttBroker.host": "[format('mqtts://{0}.{1}:{2}', variables('MQTT_SETTINGS').brokerListenerServiceName, variables('AIO_EXTENSION_SCOPE').cluster.releaseNamespace, variables('MQTT_SETTINGS').brokerListenerPort)]", + "schemaRegistry.values.mqttBroker.tlsEnabled": True, + "schemaRegistry.values.mqttBroker.serviceAccountTokenAudience": "[variables('MQTT_SETTINGS').serviceAccountAudience]", + }, }, "resources": { "cluster": { @@ -617,6 +582,22 @@ def copy(self) -> "TemplateBlueprint": "apiVersion": "2021-03-01", "name": "[parameters('clusterName')]", }, + "aio_extension": { + "type": "Microsoft.KubernetesConfiguration/extensions", + "apiVersion": "2023-05-01", + "scope": "[format('Microsoft.Kubernetes/connectedClusters/{0}', parameters('clusterName'))]", + "name": "[format('azure-iot-operations-{0}', variables('AIO_EXTENSION_SUFFIX'))]", + "identity": {"type": "SystemAssigned"}, + "properties": { + "extensionType": "microsoft.iotoperations", + "version": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'aio'), 'version'), variables('VERSIONS').iotOperations)]", + "releaseTrain": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'aio'), 'train'), variables('TRAINS').iotOperations)]", + "autoUpgradeMinorVersion": False, + "scope": "[variables('AIO_EXTENSION_SCOPE')]", + "configurationSettings": "[union(variables('defaultAioConfigurationSettings'), coalesce(tryGet(tryGet(parameters('advancedConfig'), 'aio'), 'configurationSettingsOverride'), createObject()))]", + }, + "dependsOn": ["cluster"], + }, "customLocation": { "type": "Microsoft.ExtendedLocation/customLocations", "apiVersion": "2021-08-31-preview", @@ -626,11 +607,11 @@ def copy(self) -> "TemplateBlueprint": "hostResourceId": "[resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName'))]", "namespace": "[parameters('clusterNamespace')]", "displayName": "[parameters('customLocationName')]", - "clusterExtensionIds": "[parameters('clExtentionIds')]", + "clusterExtensionIds": "[flatten(createArray(parameters('clExtentionIds'), createArray(extensionResourceId(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName')), 'Microsoft.KubernetesConfiguration/extensions', format('azure-iot-operations-{0}', variables('AIO_EXTENSION_SUFFIX'))))))]", }, - "dependsOn": ["cluster"], + "dependsOn": ["aio_extension", "cluster"], }, - "broker_syncRule": { + "aio_syncRule": { "condition": "[parameters('deployResourceSyncRules')]", "type": "Microsoft.ExtendedLocation/customLocations/resourceSyncRules", "apiVersion": "2021-08-31-preview", @@ -654,11 +635,11 @@ def copy(self) -> "TemplateBlueprint": "selector": {"matchLabels": {"management.azure.com/provider-name": "Microsoft.DeviceRegistry"}}, "targetResourceGroup": "[resourceGroup().id]", }, - "dependsOn": ["broker_syncRule", "customLocation"], + "dependsOn": ["aio_syncRule", "customLocation"], }, "aioInstance": { "type": "Microsoft.IoTOperations/instances", - "apiVersion": "2024-08-15-preview", + "apiVersion": "2024-09-15-preview", "name": "[format('aio-{0}', coalesce(tryGet(parameters('advancedConfig'), 'resourceSuffix'), take(uniqueString(resourceGroup().id, parameters('clusterName'), parameters('clusterNamespace')), 5)))]", "location": "[parameters('clusterLocation')]", "extendedLocation": { @@ -668,13 +649,13 @@ def copy(self) -> "TemplateBlueprint": "identity": "[if(empty(parameters('userAssignedIdentity')), createObject('type', 'None'), createObject('type', 'UserAssigned', 'userAssignedIdentities', createObject(format('{0}', parameters('userAssignedIdentity')), createObject())))]", "properties": { "description": "An AIO instance.", - "schemaRegistryNamespace": "[reference(parameters('schemaRegistryId'), '2024-07-01-preview').namespace]", + "schemaRegistryRef": {"resourceId": "[parameters('schemaRegistryId')]"}, }, "dependsOn": ["customLocation"], }, "broker": { "type": "Microsoft.IoTOperations/instances/brokers", - "apiVersion": "2024-08-15-preview", + "apiVersion": "2024-09-15-preview", "name": "[format('{0}/{1}', format('aio-{0}', coalesce(tryGet(parameters('advancedConfig'), 'resourceSuffix'), take(uniqueString(resourceGroup().id, parameters('clusterName'), parameters('clusterNamespace')), 5))), 'default')]", "extendedLocation": { "name": "[resourceId('Microsoft.ExtendedLocation/customLocations', parameters('customLocationName'))]", @@ -699,7 +680,7 @@ def copy(self) -> "TemplateBlueprint": }, "broker_authn": { "type": "Microsoft.IoTOperations/instances/brokers/authentications", - "apiVersion": "2024-08-15-preview", + "apiVersion": "2024-09-15-preview", "name": "[format('{0}/{1}/{2}', format('aio-{0}', coalesce(tryGet(parameters('advancedConfig'), 'resourceSuffix'), take(uniqueString(resourceGroup().id, parameters('clusterName'), parameters('clusterNamespace')), 5))), 'default', 'default')]", "extendedLocation": { "name": "[resourceId('Microsoft.ExtendedLocation/customLocations', parameters('customLocationName'))]", @@ -719,7 +700,7 @@ def copy(self) -> "TemplateBlueprint": }, "broker_listener": { "type": "Microsoft.IoTOperations/instances/brokers/listeners", - "apiVersion": "2024-08-15-preview", + "apiVersion": "2024-09-15-preview", "name": "[format('{0}/{1}/{2}', format('aio-{0}', coalesce(tryGet(parameters('advancedConfig'), 'resourceSuffix'), take(uniqueString(resourceGroup().id, parameters('clusterName'), parameters('clusterNamespace')), 5))), 'default', 'default')]", "extendedLocation": { "name": "[resourceId('Microsoft.ExtendedLocation/customLocations', parameters('customLocationName'))]", @@ -749,7 +730,7 @@ def copy(self) -> "TemplateBlueprint": }, "dataflow_profile": { "type": "Microsoft.IoTOperations/instances/dataflowProfiles", - "apiVersion": "2024-08-15-preview", + "apiVersion": "2024-09-15-preview", "name": "[format('{0}/{1}', format('aio-{0}', coalesce(tryGet(parameters('advancedConfig'), 'resourceSuffix'), take(uniqueString(resourceGroup().id, parameters('clusterName'), parameters('clusterNamespace')), 5))), 'default')]", "extendedLocation": { "name": "[resourceId('Microsoft.ExtendedLocation/customLocations', parameters('customLocationName'))]", @@ -760,7 +741,7 @@ def copy(self) -> "TemplateBlueprint": }, "dataflow_endpoint": { "type": "Microsoft.IoTOperations/instances/dataflowEndpoints", - "apiVersion": "2024-08-15-preview", + "apiVersion": "2024-09-15-preview", "name": "[format('{0}/{1}', format('aio-{0}', coalesce(tryGet(parameters('advancedConfig'), 'resourceSuffix'), take(uniqueString(resourceGroup().id, parameters('clusterName'), parameters('clusterNamespace')), 5))), 'default')]", "extendedLocation": { "name": "[resourceId('Microsoft.ExtendedLocation/customLocations', parameters('customLocationName'))]", @@ -786,6 +767,20 @@ def copy(self) -> "TemplateBlueprint": }, }, "outputs": { + "aioExtension": { + "type": "object", + "value": { + "name": "[format('azure-iot-operations-{0}', variables('AIO_EXTENSION_SUFFIX'))]", + "id": "[extensionResourceId(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName')), 'Microsoft.KubernetesConfiguration/extensions', format('azure-iot-operations-{0}', variables('AIO_EXTENSION_SUFFIX')))]", + "version": "[reference('aio_extension').version]", + "releaseTrain": "[reference('aio_extension').releaseTrain]", + "config": { + "brokerListenerName": "[variables('MQTT_SETTINGS').brokerListenerServiceName]", + "brokerListenerPort": "[variables('MQTT_SETTINGS').brokerListenerPort]", + }, + "identityPrincipalId": "[reference('aio_extension', '2023-05-01', 'full').identity.principalId]", + }, + }, "aio": { "type": "object", "value": { diff --git a/azext_edge/edge/providers/orchestration/work.py b/azext_edge/edge/providers/orchestration/work.py index e8940985e..c261d073f 100644 --- a/azext_edge/edge/providers/orchestration/work.py +++ b/azext_edge/edge/providers/orchestration/work.py @@ -5,6 +5,7 @@ # ---------------------------------------------------------------------------------------------- from enum import IntEnum +from functools import reduce from json import dumps from time import sleep from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union @@ -24,6 +25,7 @@ from ...util.az_client import ( REGISTRY_API_VERSION, get_resource_client, + parse_resource_id, wait_for_terminal_state, ) from .permissions import ROLE_DEF_FORMAT_STR, PermissionManager @@ -286,11 +288,6 @@ def _do_work(self): # noqa: C901 # Enable IoT Ops workflow if self._apply_foundation: - # Ensure schema registry exists. - self.resource_client.resources.get_by_id( - resource_id=self._targets.schema_registry_resource_id, - api_version=REGISTRY_API_VERSION, - ) enablement_work_name = self._work_format_str.format(op="enablement") self.render_display( category=WorkCategoryKey.ENABLE_IOT_OPS, active_step=WorkStepKey.WHAT_IF_ENABLEMENT @@ -326,26 +323,9 @@ def _do_work(self): # noqa: C901 _ = wait_for_terminal_state(enablement_poller) self._extension_map = self._resource_map.connected_cluster.get_extensions_by_type( - IOT_OPS_EXTENSION_TYPE, IOT_OPS_PLAT_EXTENSION_TYPE, SECRET_SYNC_EXTENSION_TYPE + IOT_OPS_PLAT_EXTENSION_TYPE, SECRET_SYNC_EXTENSION_TYPE ) - role_assignment_error = None - try: - self.permission_manager.apply_role_assignment( - scope=self._targets.schema_registry_resource_id, - principal_id=self._extension_map[IOT_OPS_EXTENSION_TYPE]["identity"]["principalId"], - role_def_id=ROLE_DEF_FORMAT_STR.format( - subscription_id=self.subscription_id, - role_id=CONTRIBUTOR_ROLE_ID, # TODO - @digimaun use schema registry subscription. - ), - ) - except Exception as e: - role_assignment_error = get_user_msg_warn_ra( - prefix=f"Role assignment failed with:\n{str(e)}.", - principal_id=self._extension_map[IOT_OPS_EXTENSION_TYPE]["identity"]["principalId"], - scope=self._targets.schema_registry_resource_id, - ) - self.complete_step( category=WorkCategoryKey.ENABLE_IOT_OPS, completed_step=WorkStepKey.DEPLOY_ENABLEMENT ) @@ -355,17 +335,20 @@ def _do_work(self): # noqa: C901 resource_tree = self._resource_map.build_tree() self.stop_display() print(resource_tree) - if role_assignment_error: - logger.warning(role_assignment_error) return # TODO @digimaun - work_kpis return work_kpis # Deploy IoT Ops workflow if self._targets.instance_name: + # Ensure schema registry exists. + self.resource_client.resources.get_by_id( + resource_id=self._targets.schema_registry_resource_id, + api_version=REGISTRY_API_VERSION, + ) if not self._extension_map: self._extension_map = self._resource_map.connected_cluster.get_extensions_by_type( - IOT_OPS_EXTENSION_TYPE, IOT_OPS_PLAT_EXTENSION_TYPE, SECRET_SYNC_EXTENSION_TYPE + IOT_OPS_PLAT_EXTENSION_TYPE, SECRET_SYNC_EXTENSION_TYPE ) # TODO - @digmaun revisit if any(not v for v in self._extension_map.values()): @@ -378,9 +361,6 @@ def _do_work(self): # noqa: C901 self.render_display(category=WorkCategoryKey.DEPLOY_IOT_OPS, active_step=WorkStepKey.WHAT_IF_INSTANCE) instance_content, instance_parameters = self._targets.get_ops_instance_template( cl_extension_ids=[self._extension_map[ext]["id"] for ext in self._extension_map], - ops_extension_config=self._extension_map[IOT_OPS_EXTENSION_TYPE]["properties"][ - "configurationSettings" - ], ) self._deploy_template( content=instance_content, @@ -409,13 +389,39 @@ def _do_work(self): # noqa: C901 f"{self._display.categories[WorkCategoryKey.DEPLOY_IOT_OPS][0].title}[/link]" ) self.render_display(category=WorkCategoryKey.DEPLOY_IOT_OPS) - _ = wait_for_terminal_state(instance_poller) + instance_output = wait_for_terminal_state(instance_poller) + + # safely get nested property + keys = ['properties', 'outputs', 'aioExtension', 'value', 'identityPrincipalId'] + extension_principal_id = reduce(lambda val, key: val.get(key) if val else None, keys, instance_output) + # TODO - @c-ryan-k consider setting role_assignment_error if extension_principal_id is None + role_assignment_error = None + try: + schema_registry_id_parts = parse_resource_id(self._targets.schema_registry_resource_id) + self.permission_manager.apply_role_assignment( + scope=self._targets.schema_registry_resource_id, + principal_id=extension_principal_id, + role_def_id=ROLE_DEF_FORMAT_STR.format( + subscription_id=schema_registry_id_parts.subscription_id, + role_id=CONTRIBUTOR_ROLE_ID, + ), + ) + except Exception as e: + role_assignment_error = get_user_msg_warn_ra( + prefix=f"Role assignment failed with:\n{str(e)}.", + principal_id=extension_principal_id, + scope=self._targets.schema_registry_resource_id, + ) + self.complete_step( category=WorkCategoryKey.DEPLOY_IOT_OPS, completed_step=WorkStepKey.DEPLOY_INSTANCE, ) + if role_assignment_error: + logger.warning(role_assignment_error) if self._show_progress: + # hopefully this adds the aio extension correctly self._resource_map.refresh_resource_state() resource_tree = self._resource_map.build_tree() self.stop_display() diff --git a/azext_edge/tests/edge/init/int/test_init_int.py b/azext_edge/tests/edge/init/int/test_init_int.py index 6600c8831..c0e2d37ea 100644 --- a/azext_edge/tests/edge/init/int/test_init_int.py +++ b/azext_edge/tests/edge/init/int/test_init_int.py @@ -61,7 +61,6 @@ def init_test_setup(settings, tracked_resources): "clusterName": settings.env.azext_edge_cluster, "resourceGroup": settings.env.azext_edge_rg, "schemaRegistryId": registry["id"], - "schemaRegistryNamespace": registry_namespace, "instanceName": instance_name, "additionalCreateArgs": _strip_quotes(settings.env.azext_edge_create_args), "additionalInitArgs": _strip_quotes(settings.env.azext_edge_init_args), @@ -94,7 +93,7 @@ def test_init_scenario( registry_id = init_test_setup["schemaRegistryId"] instance_name = init_test_setup["instanceName"] command = f"az iot ops init -g {resource_group} --cluster {cluster_name} "\ - f"--sr-resource-id {registry_id} --no-progress {additional_init_args} " + f"--no-progress {additional_init_args} " # TODO: assert return once there is a return for init run(command) @@ -103,7 +102,8 @@ def test_init_scenario( # create command create_command = f"az iot ops create -g {resource_group} --cluster {cluster_name} "\ - f"-n {instance_name} --no-progress {additional_create_args} " + f"--sr-resource-id {registry_id} -n {instance_name} "\ + f"--no-progress {additional_create_args} " # TODO: assert create when return be returning run(create_command) @@ -132,7 +132,7 @@ def test_init_scenario( instance_name=instance_name, cluster_name=cluster_name, resource_group=resource_group, - schema_registry_namespace=init_test_setup["schemaRegistryNamespace"], + schema_registry_id=registry_id, **create_arg_dict ) except Exception as e: # pylint: disable=broad-except @@ -190,7 +190,7 @@ def assert_aio_init( def assert_aio_instance( instance_name: str, resource_group: str, - schema_registry_namespace: str, + schema_registry_id: str, custom_location: Optional[str] = None, description: Optional[str] = None, location: Optional[str] = None, @@ -209,7 +209,7 @@ def assert_aio_instance( instance_props = instance_show["properties"] assert instance_props.get("description") == description - assert instance_props["schemaRegistryNamespace"] == schema_registry_namespace + assert instance_props["schemaRegistryRef"] == {"resource_id": schema_registry_id} expected_components = {"adr", "akri", "connectors", "dataflows", "schemaRegistry"} disabled_components = [] diff --git a/azext_edge/tests/edge/orchestration/test_deletion_unit.py b/azext_edge/tests/edge/orchestration/test_deletion_unit.py index 6f975e860..2ca77ffc9 100644 --- a/azext_edge/tests/edge/orchestration/test_deletion_unit.py +++ b/azext_edge/tests/edge/orchestration/test_deletion_unit.py @@ -10,6 +10,7 @@ import pytest from azext_edge.edge.providers.orchestration.deletion import IoTOperationsResource +from azext_edge.edge.providers.orchestration.work import IOT_OPS_EXTENSION_TYPE from ...generators import generate_random_string @@ -86,6 +87,16 @@ def _assemble_resource_map_mock( resource_map_mock().custom_locations = custom_locations resource_map_mock().get_resources.return_value = resources resource_map_mock().get_resource_sync_rules.return_value = sync_rules + resource_map_mock().connected_cluster.get_extensions_by_type.return_value = { + IOT_OPS_EXTENSION_TYPE: {"id": "aio-ext-id"} + } + resource_map_mock().extensions.append( + IoTOperationsResource( + resource_id="aio-ext-id", + display_name="aio-extension", + api_version="aio-ext-api" + ) + ) @pytest.mark.parametrize( @@ -95,7 +106,7 @@ def _assemble_resource_map_mock( "resources": None, "resource sync rules": None, "custom locations": None, - "extensions": None, + "extensions": [], "meta": { "expected_total": 0, }, @@ -183,6 +194,7 @@ def test_batch_resources( "extensions": [], "meta": { "expected_total": 0, + "expected_delete_calls": 1, # aio extension }, }, { @@ -260,8 +272,6 @@ def test_delete_lifecycle( delete_ops_resources(**kwargs) expected_delete_calls: int = expected_resources_map["meta"].get("expected_delete_calls", 0) - if not include_dependencies and expected_delete_calls > 0: - expected_delete_calls = expected_delete_calls - 1 spy_deletion_manager["_display_resource_tree"].assert_called_once() spy_deletion_manager["_process"].assert_called_once() diff --git a/azext_edge/tests/edge/orchestration/test_resource_map_unit.py b/azext_edge/tests/edge/orchestration/test_resource_map_unit.py index 7e3ecf43b..79068be4d 100644 --- a/azext_edge/tests/edge/orchestration/test_resource_map_unit.py +++ b/azext_edge/tests/edge/orchestration/test_resource_map_unit.py @@ -4,6 +4,7 @@ # Licensed under the MIT License. See License file in the project root for license information. # ---------------------------------------------------------------------------------------------- + from typing import List, Optional from unittest.mock import Mock from random import randint @@ -12,6 +13,7 @@ from rich.tree import Tree from azext_edge.edge.providers.orchestration.resource_map import IoTOperationsResource +from azext_edge.edge.providers.orchestration.work import IOT_OPS_EXTENSION_TYPE from ...generators import generate_random_string, get_zeroed_subscription @@ -50,6 +52,15 @@ def _assemble_connected_cluster_mock( resources: Optional[List[dict]], sync_rules: Optional[List[dict]], ): + # aio extension + aio_extension = { + "id": "aio_extension_id", + "name": "aio-extension-name", + "apiVersion": "aio-api-version", + } + if extensions: + extensions.append(aio_extension) + cluster_mock().subscription_id = sub cluster_mock().cluster_name = cluster_name cluster_mock().resource_group_name = rg_name @@ -57,6 +68,7 @@ def _assemble_connected_cluster_mock( cluster_mock().get_aio_custom_locations.return_value = custom_locations cluster_mock().get_aio_resources.return_value = resources cluster_mock().get_resource_sync_rules.return_value = sync_rules + cluster_mock().get_extensions_by_type.return_value = {IOT_OPS_EXTENSION_TYPE: aio_extension} @pytest.mark.parametrize( @@ -70,6 +82,7 @@ def _assemble_connected_cluster_mock( @pytest.mark.parametrize("expected_resources", [None, _generate_records(5)]) @pytest.mark.parametrize("expected_resource_sync_rules", [None, _generate_records()]) @pytest.mark.parametrize("category_color", [None, "red"]) +@pytest.mark.parametrize("include_dependencies", [True, False]) def test_resource_map( mocker, mocked_cmd: Mock, @@ -79,6 +92,7 @@ def test_resource_map( expected_resources: Optional[List[dict]], expected_resource_sync_rules: Optional[List[dict]], category_color: Optional[str], + include_dependencies: bool, ): from azext_edge.edge.providers.orchestration.resource_map import ( IoTOperationsResourceMap, @@ -114,12 +128,10 @@ def test_resource_map( if expected_custom_locations: for cl in expected_custom_locations: - _assert_ops_resource_eq( - resource_map.get_resources(cl["id"]), expected_resources, verify_segment_order=True - ) + _assert_ops_resource_eq(resource_map.get_resources(cl["id"]), expected_resources, verify_segment_order=True) _assert_ops_resource_eq(resource_map.get_resource_sync_rules(cl["id"]), expected_resource_sync_rules) - kwargs = {} + kwargs = {"include_dependencies": include_dependencies} if category_color: kwargs["category_color"] = category_color @@ -163,12 +175,14 @@ def _assert_tree( expected_aio_resources: Optional[List[dict]], expected_resource_sync_rules: Optional[List[dict]], category_color: str = "cyan", + include_dependencies: bool = False, ): assert tree.label == f"[green]{cluster_name}" assert tree.children[0].label == f"[{category_color}]extensions" if expected_aio_extensions: - for i in range(len(expected_aio_extensions)): + range_len = len(expected_aio_extensions) if include_dependencies else 1 # Only aio extension + for i in range(range_len): tree.children[0].children[i].label == expected_aio_extensions[i]["name"] if expected_aio_custom_locations: diff --git a/azext_edge/tests/edge/orchestration/test_targets_unit.py b/azext_edge/tests/edge/orchestration/test_targets_unit.py index f33fc1dd7..b6e0d2a2e 100644 --- a/azext_edge/tests/edge/orchestration/test_targets_unit.py +++ b/azext_edge/tests/edge/orchestration/test_targets_unit.py @@ -13,7 +13,6 @@ InitTargets, assemble_nargs_to_dict, get_insecure_listener, - REGISTRY_API_VERSION, ) from ...generators import generate_random_string @@ -40,8 +39,6 @@ def get_trust_settings(): KVP_KEYS = frozenset(["ops_config", "trust_settings"]) ENABLEMENT_PARAM_CONVERSION_MAP = { "clusterName": "cluster_name", - "kubernetesDistro": "kubernetes_distro", - "containerRuntimeSocket": "container_runtime_socket", "trustConfig": "trust_config", "schemaRegistryId": "schema_registry_resource_id", "advancedConfig": "advanced_config", @@ -50,6 +47,8 @@ def get_trust_settings(): "clusterName": "cluster_name", "clusterNamespace": "cluster_namespace", "clusterLocation": "location", + "kubernetesDistro": "kubernetes_distro", + "containerRuntimeSocket": "container_runtime_socket", "customLocationName": "custom_location_name", "deployResourceSyncRules": "deploy_resource_sync_rules", "schemaRegistryId": "schema_registry_resource_id", @@ -130,7 +129,8 @@ def test_init_targets(target_scenario: dict): verify_user_trust_settings(targets, target_scenario) - enablement_template, enablement_parameters = targets.get_ops_enablement_template() + _, enablement_parameters = targets.get_ops_enablement_template() + # test enablement_template for parameter in enablement_parameters: targets_key = parameter if parameter in ENABLEMENT_PARAM_CONVERSION_MAP: @@ -139,27 +139,22 @@ def test_init_targets(target_scenario: dict): targets, targets_key ), f"{parameter} value mismatch with targets {targets_key} value." - if targets.ops_config: - aio_config_settings = enablement_template["variables"]["defaultAioConfigurationSettings"] - for c in targets.ops_config: - assert c in aio_config_settings - assert aio_config_settings[c] == targets.ops_config[c] - - if targets.ops_version: - assert enablement_template["variables"]["VERSIONS"]["aio"] == targets.ops_version - extension_ids = [generate_random_string(), generate_random_string()] - extension_config = {"schemaRegistry.values.resourceId": target_scenario.get("schema_registry_resource_id")} target_scenario_has_user_trust = target_scenario.get("trust_settings") if target_scenario_has_user_trust: - extension_config["trustSource"] = "CustomerManaged" - extension_config["trustBundleSettings.issuer.name"] = target_scenario["trust_settings"]["issuerName"] - extension_config["trustBundleSettings.issuer.kind"] = target_scenario["trust_settings"]["issuerKind"] - extension_config["trustBundleSettings.configMap.name"] = target_scenario["trust_settings"]["configMapName"] - extension_config["trustBundleSettings.configMap.key"] = target_scenario["trust_settings"]["configMapKey"] targets.trust_config = None - instance_template, instance_parameters = targets.get_ops_instance_template(extension_ids, extension_config) + instance_template, instance_parameters = targets.get_ops_instance_template(extension_ids) + + if targets.ops_version: + assert instance_template["variables"]["VERSIONS"]["iotOperations"] == targets.ops_version + + if targets.ops_config: + aio_config_settings = instance_template["variables"]["defaultAioConfigurationSettings"] + for c in targets.ops_config: + assert c in aio_config_settings + assert aio_config_settings[c] == targets.ops_config[c] + for parameter in instance_parameters: if parameter == "clExtentionIds": assert instance_parameters[parameter]["value"] == extension_ids @@ -173,10 +168,9 @@ def test_init_targets(target_scenario: dict): assert instance_template["resources"]["aioInstance"]["properties"]["description"] == targets.instance_description - assert ( - instance_template["resources"]["aioInstance"]["properties"]["schemaRegistryNamespace"] - == f"[reference(parameters('schemaRegistryId'), '{REGISTRY_API_VERSION}').namespace]" - ) + assert instance_template["resources"]["aioInstance"]["properties"]["schemaRegistryRef"] == { + "resourceId": "[parameters('schemaRegistryId')]" + } if targets.tags: assert instance_template["resources"]["aioInstance"]["tags"] == targets.tags diff --git a/azext_edge/tests/edge/orchestration/test_template_unit.py b/azext_edge/tests/edge/orchestration/test_template_unit.py index 40c55ac7d..798058341 100644 --- a/azext_edge/tests/edge/orchestration/test_template_unit.py +++ b/azext_edge/tests/edge/orchestration/test_template_unit.py @@ -10,8 +10,8 @@ from azext_edge.edge.providers.orchestration.template import ( IOT_OPERATIONS_VERSION_MONIKER, - M2_ENABLEMENT_TEMPLATE, - M2_INSTANCE_TEMPLATE, + M3_ENABLEMENT_TEMPLATE, + M3_INSTANCE_TEMPLATE, TemplateBlueprint, get_insecure_listener, ) @@ -24,10 +24,9 @@ [ "cluster", "aio_platform_extension", - "secret_sync_controller_extension", + "secret_store_extension", "open_service_mesh_extension", - "edge_storage_accelerator_extension", - "aio_extension", + "container_storage_extension", ] ) @@ -35,8 +34,9 @@ EXPECTED_INSTANCE_RESOURCE_KEYS = frozenset( [ "cluster", + "aio_extension", "customLocation", - "broker_syncRule", + "aio_syncRule", "deviceRegistry_syncRule", "aioInstance", "broker", @@ -59,37 +59,37 @@ def test_enablement_template(): - assert M2_ENABLEMENT_TEMPLATE.commit_id - assert M2_ENABLEMENT_TEMPLATE.content + assert M3_ENABLEMENT_TEMPLATE.commit_id + assert M3_ENABLEMENT_TEMPLATE.content for resource in EXPECTED_EXTENSION_RESOURCE_KEYS: - assert M2_ENABLEMENT_TEMPLATE.get_resource_by_key(resource) + assert M3_ENABLEMENT_TEMPLATE.get_resource_by_key(resource) for definition in EXPECTED_SHARED_DEFINITION_KEYS: - assert M2_ENABLEMENT_TEMPLATE.get_type_definition(definition) + assert M3_ENABLEMENT_TEMPLATE.get_type_definition(definition) def test_instance_template(): - assert M2_INSTANCE_TEMPLATE.commit_id - assert M2_INSTANCE_TEMPLATE.content + assert M3_INSTANCE_TEMPLATE.commit_id + assert M3_INSTANCE_TEMPLATE.content for resource in EXPECTED_INSTANCE_RESOURCE_KEYS: - assert M2_INSTANCE_TEMPLATE.get_resource_by_key(resource) + assert M3_INSTANCE_TEMPLATE.get_resource_by_key(resource) for definition in EXPECTED_SHARED_DEFINITION_KEYS: - assert M2_INSTANCE_TEMPLATE.get_type_definition(definition) + assert M3_INSTANCE_TEMPLATE.get_type_definition(definition) - instance = M2_INSTANCE_TEMPLATE.get_resource_by_type("Microsoft.IoTOperations/instances") + instance = M3_INSTANCE_TEMPLATE.get_resource_by_type("Microsoft.IoTOperations/instances") assert instance and isinstance(instance, dict) # Copy test in other area - m2_template_copy = M2_INSTANCE_TEMPLATE.copy() + m3_template_copy = M3_INSTANCE_TEMPLATE.copy() instance_name = generate_random_string() broker_name = generate_random_string() - m2_template_copy.add_resource("insecure_listener", get_insecure_listener(instance_name, broker_name)) - listeners = m2_template_copy.get_resource_by_type( + m3_template_copy.add_resource("insecure_listener", get_insecure_listener(instance_name, broker_name)) + listeners = m3_template_copy.get_resource_by_type( "Microsoft.IoTOperations/instances/brokers/listeners", first=False ) assert listeners and isinstance(listeners, list) From 295a7eefc8d510c0b825ad80ba1f4c8703fd1a19 Mon Sep 17 00:00:00 2001 From: Ryan K Date: Fri, 18 Oct 2024 13:59:02 -0700 Subject: [PATCH 04/26] fix(test): fix prefix for esa -> acsa in support bundle tests (#412) --- .../support/create_bundle_int/test_arccontainerstorage_int.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azext_edge/tests/edge/support/create_bundle_int/test_arccontainerstorage_int.py b/azext_edge/tests/edge/support/create_bundle_int/test_arccontainerstorage_int.py index dde80ac64..0c29343b1 100644 --- a/azext_edge/tests/edge/support/create_bundle_int/test_arccontainerstorage_int.py +++ b/azext_edge/tests/edge/support/create_bundle_int/test_arccontainerstorage_int.py @@ -33,7 +33,7 @@ def test_create_bundle_arccontainerstorage(init_setup, tracked_files): assert set(file_map.keys()).issubset(set(expected_types)) workload_resource_prefixes = [ - "esa-otel-collector", + "acsa-otel", "csi-wyvern-controller", "csi-wyvern-node", "config-operator", From 342a1c68fcda91fa3c61a3054240e994318e944d Mon Sep 17 00:00:00 2001 From: Ryan K Date: Fri, 18 Oct 2024 14:46:38 -0700 Subject: [PATCH 05/26] refactor: update broker stats to use new property names (#413) --- azext_edge/edge/common.py | 14 +- .../tests/edge/checks/test_mq_checks_unit.py | 6 +- azext_edge/tests/edge/mq/raw_stats.txt | 572 +++++++++--------- azext_edge/tests/edge/mq/traces_data.py | 68 +-- .../resources/test_broker_listeners_unit.py | 2 +- .../edge/support/create_bundle_int/helpers.py | 4 +- 6 files changed, 333 insertions(+), 333 deletions(-) diff --git a/azext_edge/edge/common.py b/azext_edge/edge/common.py index 59ca15a62..05cf60278 100644 --- a/azext_edge/edge/common.py +++ b/azext_edge/edge/common.py @@ -131,13 +131,13 @@ class MqDiagnosticPropertyIndex(Enum): MQ Diagnostic Property Index Strings """ - publishes_received_per_second = "aio_mq_publishes_received_per_second" - publishes_sent_per_second = "aio_mq_publishes_sent_per_second" - publish_route_replication_correctness = "aio_mq_publish_route_replication_correctness" - publish_latency_mu_ms = "aio_mq_publish_latency_mu_ms" - publish_latency_sigma_ms = "aio_mq_publish_latency_sigma_ms" - connected_sessions = "aio_mq_connected_sessions" - total_subscriptions = "aio_mq_total_subscriptions" + publishes_received_per_second = "aio_broker_publishes_received_per_second" + publishes_sent_per_second = "aio_broker_publishes_sent_per_second" + publish_route_replication_correctness = "aio_broker_publish_route_replication_correctness" + publish_latency_mu_ms = "aio_broker_publish_latency_mu_ms" + publish_latency_sigma_ms = "aio_broker_publish_latency_sigma_ms" + connected_sessions = "aio_broker_connected_sessions" + total_subscriptions = "aio_broker_store_total_subscriptions" class OpsServiceType(ListableEnum): diff --git a/azext_edge/tests/edge/checks/test_mq_checks_unit.py b/azext_edge/tests/edge/checks/test_mq_checks_unit.py index a9f9a8b26..dc993f694 100644 --- a/azext_edge/tests/edge/checks/test_mq_checks_unit.py +++ b/azext_edge/tests/edge/checks/test_mq_checks_unit.py @@ -81,7 +81,7 @@ def test_check_mq_by_resource_types(ops_service, mocker, mock_resource_types, re "ports": [ {"name": "bincode-listener-service", "port": 9700, "protocol": "TCP", "targetPort": 9700}, {"name": "protobuf-listener-service", "port": 9800, "protocol": "TCP", "targetPort": 9800}, - {"name": "aio-mq-metrics-service", "port": 9600, "protocol": "TCP", "targetPort": 9600}, + {"name": "aio-broker-metrics-service", "port": 9600, "protocol": "TCP", "targetPort": 9600}, ], }, ), @@ -153,7 +153,7 @@ def test_check_mq_by_resource_types(ops_service, mocker, mock_resource_types, re "ports": [ {"name": "bincode-listener-service", "port": 9700, "protocol": "TCP", "targetPort": 9700}, {"name": "protobuf-listener-service", "port": 9800, "protocol": "TCP", "targetPort": 9800}, - {"name": "aio-mq-metrics-service", "port": 9600, "protocol": "TCP", "targetPort": 9600}, + {"name": "aio-broker-metrics-service", "port": 9600, "protocol": "TCP", "targetPort": 9600}, ], }, ), @@ -225,7 +225,7 @@ def test_check_mq_by_resource_types(ops_service, mocker, mock_resource_types, re "ports": [ {"name": "bincode-listener-service", "port": 9700, "protocol": "TCP", "targetPort": 9700}, {"name": "protobuf-listener-service", "port": 9800, "protocol": "TCP", "targetPort": 9800}, - {"name": "aio-mq-metrics-service", "port": 9600, "protocol": "TCP", "targetPort": 9600}, + {"name": "aio-broker-metrics-service", "port": 9600, "protocol": "TCP", "targetPort": 9600}, ], }, ), diff --git a/azext_edge/tests/edge/mq/raw_stats.txt b/azext_edge/tests/edge/mq/raw_stats.txt index 07a5411a7..545a58c0f 100644 --- a/azext_edge/tests/edge/mq/raw_stats.txt +++ b/azext_edge/tests/edge/mq/raw_stats.txt @@ -1,288 +1,288 @@ -# TYPE aio_mq_store_retained_messages gauge -aio_mq_store_retained_messages{hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_retained_messages{hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 1 -aio_mq_store_retained_messages{hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 1 -aio_mq_store_retained_messages{hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -# TYPE aio_mq_authentication_deny counter -aio_mq_authentication_deny{category="uncategorized",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authentication_deny{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authentication_deny{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authentication_deny{category="uncategorized",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authentication_deny{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authentication_deny{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authentication_deny{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authentication_deny{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authentication_deny{category="aio-opc",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -# TYPE aio_mq_ping_correctness gauge -aio_mq_ping_correctness{route="aio-mq-dmqtt-frontend-1"} 1 -aio_mq_ping_correctness{route="aio-mq-dmqtt-frontend-0"} 1 -# TYPE aio_mq_subscribe_route_replication_correctness gauge -aio_mq_subscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1"} 1 -aio_mq_subscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1"} 1 -aio_mq_subscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1"} 1 -aio_mq_subscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0"} 1 -aio_mq_subscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1"} 1 -aio_mq_subscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0"} 1 -aio_mq_subscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0"} 1 -aio_mq_subscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1"} 1 -aio_mq_subscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0"} 1 -aio_mq_subscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0"} 1 -aio_mq_subscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0"} 1 -aio_mq_subscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1"} 1 -aio_mq_subscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1"} 1 -aio_mq_subscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0"} 1 -aio_mq_subscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0"} 1 -aio_mq_subscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1"} 1 -# TYPE aio_mq_publish_route_replication_correctness gauge -aio_mq_publish_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1"} 1 -aio_mq_publish_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0"} 1 -aio_mq_publish_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0"} 1 -aio_mq_publish_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1"} 1 -# TYPE aio_mq_store_total_subscriptions gauge -aio_mq_store_total_subscriptions{hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 258 -aio_mq_store_total_subscriptions{hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 258 -aio_mq_store_total_subscriptions{hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 258 -aio_mq_store_total_subscriptions{hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 258 -# TYPE aio_mq_publish_latency_sigma_ms gauge -aio_mq_publish_latency_sigma_ms 17.652887 -# TYPE aio_mq_ping_latency_route_ms gauge -aio_mq_ping_latency_route_ms{route=""} 111.50912 -# TYPE aio_mq_subscribe_latency_route_ms gauge -aio_mq_subscribe_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1"} 3.217114 -aio_mq_subscribe_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0"} 46.881306 -aio_mq_subscribe_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1"} 2.3446221 -aio_mq_subscribe_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1"} 77.88732 -aio_mq_subscribe_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1"} 44.94545 -aio_mq_subscribe_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0"} 7.618704 -aio_mq_subscribe_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1"} 47.346355 -aio_mq_subscribe_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1"} 48.460068 -aio_mq_subscribe_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0"} 3.41304 -aio_mq_subscribe_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0"} 49.23383 -aio_mq_subscribe_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1"} 49.087955 -aio_mq_subscribe_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1"} 46.974224 -aio_mq_subscribe_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0"} 47.013584 -aio_mq_subscribe_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0"} 51.384243 -aio_mq_subscribe_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0"} 3.541 -aio_mq_subscribe_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0"} 49.028214 -# TYPE aio_mq_ping_latency_mu_ms gauge -aio_mq_ping_latency_mu_ms 112.64005 -# TYPE aio_mq_store_will_bytes gauge -aio_mq_store_will_bytes{hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_will_bytes{hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_will_bytes{hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_will_bytes{hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -# TYPE aio_mq_connected_sessions gauge -aio_mq_connected_sessions{hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 2 -aio_mq_connected_sessions{hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 5 -# TYPE aio_mq_payload_bytes_received counter -aio_mq_payload_bytes_received{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_payload_bytes_received{category="uncategorized",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_payload_bytes_received{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_payload_bytes_received{category="aio-opc",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 214 -aio_mq_payload_bytes_received{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 1984 -aio_mq_payload_bytes_received{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_payload_bytes_received{category="uncategorized",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_payload_bytes_received{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_payload_bytes_received{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -# TYPE aio_mq_publish_latency_mu_ms gauge -aio_mq_publish_latency_mu_ms 60.079346 -# TYPE aio_mq_store_total_sessions gauge -aio_mq_store_total_sessions{hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",is_persistent="false",namespace="azure-iot-operations",pod_type="BE"} 3 -aio_mq_store_total_sessions{hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",is_persistent="false",namespace="azure-iot-operations",pod_type="BE"} 4 -aio_mq_store_total_sessions{hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",is_persistent="false",namespace="azure-iot-operations",pod_type="BE"} 4 -aio_mq_store_total_sessions{hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",is_persistent="true",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_total_sessions{hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",is_persistent="true",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_total_sessions{hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",is_persistent="true",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_total_sessions{hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",is_persistent="true",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_total_sessions{hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",is_persistent="false",namespace="azure-iot-operations",pod_type="BE"} 3 -# TYPE aio_mq_authorization_allow counter -aio_mq_authorization_allow{category="uncategorized",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authorization_allow{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authorization_allow{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authorization_allow{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 311 -aio_mq_authorization_allow{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authorization_allow{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 315 -aio_mq_authorization_allow{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authorization_allow{category="uncategorized",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authorization_allow{category="aio-opc",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 1 -# TYPE aio_mq_store_retained_bytes gauge -aio_mq_store_retained_bytes{hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_retained_bytes{hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_retained_bytes{hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 24 -aio_mq_store_retained_bytes{hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 24 -# TYPE aio_mq_unsubscribe_latency_sigma_ms gauge -aio_mq_unsubscribe_latency_sigma_ms 21.741936 +# TYPE aio_broker_store_retained_messages gauge +aio_broker_store_retained_messages{hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_retained_messages{hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 1 +aio_broker_store_retained_messages{hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 1 +aio_broker_store_retained_messages{hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +# TYPE aio_broker_authentication_deny counter +aio_broker_authentication_deny{category="uncategorized",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authentication_deny{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authentication_deny{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authentication_deny{category="uncategorized",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authentication_deny{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authentication_deny{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authentication_deny{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authentication_deny{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authentication_deny{category="aio-opc",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +# TYPE aio_broker_ping_correctness gauge +aio_broker_ping_correctness{route="aio-broker-dmqtt-frontend-1"} 1 +aio_broker_ping_correctness{route="aio-broker-dmqtt-frontend-0"} 1 +# TYPE aio_broker_subscribe_route_replication_correctness gauge +aio_broker_subscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1"} 1 +aio_broker_subscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1"} 1 +aio_broker_subscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1"} 1 +aio_broker_subscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0"} 1 +aio_broker_subscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1"} 1 +aio_broker_subscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0"} 1 +aio_broker_subscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0"} 1 +aio_broker_subscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1"} 1 +aio_broker_subscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0"} 1 +aio_broker_subscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0"} 1 +aio_broker_subscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0"} 1 +aio_broker_subscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1"} 1 +aio_broker_subscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1"} 1 +aio_broker_subscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0"} 1 +aio_broker_subscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0"} 1 +aio_broker_subscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1"} 1 +# TYPE aio_broker_publish_route_replication_correctness gauge +aio_broker_publish_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1"} 1 +aio_broker_publish_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0"} 1 +aio_broker_publish_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0"} 1 +aio_broker_publish_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1"} 1 +# TYPE aio_broker_store_total_subscriptions gauge +aio_broker_store_total_subscriptions{hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 258 +aio_broker_store_total_subscriptions{hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 258 +aio_broker_store_total_subscriptions{hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 258 +aio_broker_store_total_subscriptions{hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 258 +# TYPE aio_broker_publish_latency_sigma_ms gauge +aio_broker_publish_latency_sigma_ms 17.652887 +# TYPE aio_broker_ping_latency_route_ms gauge +aio_broker_ping_latency_route_ms{route=""} 111.50912 +# TYPE aio_broker_subscribe_latency_route_ms gauge +aio_broker_subscribe_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1"} 3.217114 +aio_broker_subscribe_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0"} 46.881306 +aio_broker_subscribe_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1"} 2.3446221 +aio_broker_subscribe_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1"} 77.88732 +aio_broker_subscribe_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1"} 44.94545 +aio_broker_subscribe_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0"} 7.618704 +aio_broker_subscribe_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1"} 47.346355 +aio_broker_subscribe_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1"} 48.460068 +aio_broker_subscribe_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0"} 3.41304 +aio_broker_subscribe_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0"} 49.23383 +aio_broker_subscribe_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1"} 49.087955 +aio_broker_subscribe_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1"} 46.974224 +aio_broker_subscribe_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0"} 47.013584 +aio_broker_subscribe_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0"} 51.384243 +aio_broker_subscribe_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0"} 3.541 +aio_broker_subscribe_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0"} 49.028214 +# TYPE aio_broker_ping_latency_mu_ms gauge +aio_broker_ping_latency_mu_ms 112.64005 +# TYPE aio_broker_store_will_bytes gauge +aio_broker_store_will_bytes{hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_will_bytes{hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_will_bytes{hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_will_bytes{hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +# TYPE aio_broker_connected_sessions gauge +aio_broker_connected_sessions{hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 2 +aio_broker_connected_sessions{hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 5 +# TYPE aio_broker_payload_bytes_received counter +aio_broker_payload_bytes_received{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_payload_bytes_received{category="uncategorized",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_payload_bytes_received{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_payload_bytes_received{category="aio-opc",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 214 +aio_broker_payload_bytes_received{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 1984 +aio_broker_payload_bytes_received{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_payload_bytes_received{category="uncategorized",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_payload_bytes_received{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_payload_bytes_received{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +# TYPE aio_broker_publish_latency_mu_ms gauge +aio_broker_publish_latency_mu_ms 60.079346 +# TYPE aio_broker_store_total_sessions gauge +aio_broker_store_total_sessions{hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",is_persistent="false",namespace="azure-iot-operations",pod_type="BE"} 3 +aio_broker_store_total_sessions{hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",is_persistent="false",namespace="azure-iot-operations",pod_type="BE"} 4 +aio_broker_store_total_sessions{hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",is_persistent="false",namespace="azure-iot-operations",pod_type="BE"} 4 +aio_broker_store_total_sessions{hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",is_persistent="true",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_total_sessions{hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",is_persistent="true",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_total_sessions{hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",is_persistent="true",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_total_sessions{hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",is_persistent="true",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_total_sessions{hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",is_persistent="false",namespace="azure-iot-operations",pod_type="BE"} 3 +# TYPE aio_broker_authorization_allow counter +aio_broker_authorization_allow{category="uncategorized",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authorization_allow{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authorization_allow{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authorization_allow{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 311 +aio_broker_authorization_allow{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authorization_allow{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 315 +aio_broker_authorization_allow{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authorization_allow{category="uncategorized",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authorization_allow{category="aio-opc",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 1 +# TYPE aio_broker_store_retained_bytes gauge +aio_broker_store_retained_bytes{hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_retained_bytes{hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_retained_bytes{hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 24 +aio_broker_store_retained_bytes{hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 24 +# TYPE aio_broker_unsubscribe_latency_sigma_ms gauge +aio_broker_unsubscribe_latency_sigma_ms 21.741936 # TYPE qos0_messages_dropped counter -qos0_messages_dropped{category="broker_selftest",direction="outgoing",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -qos0_messages_dropped{category="uncategorized",direction="outgoing",hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -qos0_messages_dropped{category="uncategorized",direction="incoming",hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -qos0_messages_dropped{category="uncategorized",direction="incoming",hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -qos0_messages_dropped{category="uncategorized",direction="incoming",hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -qos0_messages_dropped{category="aio-opc",direction="incoming",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -qos0_messages_dropped{category="uncategorized",direction="incoming",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -qos0_messages_dropped{category="uncategorized",direction="outgoing",hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -qos0_messages_dropped{category="uncategorized",direction="outgoing",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -qos0_messages_dropped{category="aio-opc",direction="outgoing",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -qos0_messages_dropped{category="uncategorized",direction="outgoing",hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -qos0_messages_dropped{category="broker_selftest",direction="incoming",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -qos0_messages_dropped{category="uncategorized",direction="incoming",hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -qos0_messages_dropped{category="broker_selftest",direction="incoming",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -qos0_messages_dropped{category="broker_selftest",direction="outgoing",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -qos0_messages_dropped{category="uncategorized",direction="outgoing",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -qos0_messages_dropped{category="uncategorized",direction="outgoing",hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -qos0_messages_dropped{category="uncategorized",direction="incoming",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -# TYPE aio_mq_store_will_messages gauge -aio_mq_store_will_messages{hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_will_messages{hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_will_messages{hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_will_messages{hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -# TYPE aio_mq_connect_route_replication_correctness gauge -aio_mq_connect_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-0 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0"} 1 -aio_mq_connect_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1"} 1 -aio_mq_connect_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-0 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0"} 1 -aio_mq_connect_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-0 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1"} 1 -aio_mq_connect_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0"} 1 -aio_mq_connect_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-0 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1"} 1 -# TYPE aio_mq_publish_latency_route_ms gauge -aio_mq_publish_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1"} 9.32115 -aio_mq_publish_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1"} 9.317781 -aio_mq_publish_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0"} 8.493393 -aio_mq_publish_latency_route_ms{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0"} 9.31679 -# TYPE aio_mq_authentication_successes counter -aio_mq_authentication_successes{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 315 -aio_mq_authentication_successes{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authentication_successes{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authentication_successes{category="uncategorized",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authentication_successes{category="aio-opc",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 1 -aio_mq_authentication_successes{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authentication_successes{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authentication_successes{category="uncategorized",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authentication_successes{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 311 -# TYPE aio_mq_publish_latency_last_value_ms gauge -aio_mq_publish_latency_last_value_ms 84.76163 -# TYPE aio_mq_authorization_deny counter -aio_mq_authorization_deny{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authorization_deny{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authorization_deny{category="uncategorized",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authorization_deny{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authorization_deny{category="uncategorized",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authorization_deny{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authorization_deny{category="aio-opc",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authorization_deny{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authorization_deny{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -# TYPE aio_mq_publishes_received_per_second gauge -aio_mq_publishes_received_per_second{category="uncategorized",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_publishes_received_per_second{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_publishes_received_per_second{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_publishes_received_per_second{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_publishes_received_per_second{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_publishes_received_per_second{category="aio-opc",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0.033333335 -aio_mq_publishes_received_per_second{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0.53333336 -aio_mq_publishes_received_per_second{category="uncategorized",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_publishes_received_per_second{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -# TYPE aio_mq_store_connected_sessions gauge -aio_mq_store_connected_sessions{hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",is_persistent="true",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_connected_sessions{hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",is_persistent="true",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_connected_sessions{hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",is_persistent="true",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_connected_sessions{hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",is_persistent="false",namespace="azure-iot-operations",pod_type="BE"} 3 -aio_mq_store_connected_sessions{hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",is_persistent="true",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_store_connected_sessions{hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",is_persistent="false",namespace="azure-iot-operations",pod_type="BE"} 3 -aio_mq_store_connected_sessions{hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",is_persistent="false",namespace="azure-iot-operations",pod_type="BE"} 4 -aio_mq_store_connected_sessions{hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",is_persistent="false",namespace="azure-iot-operations",pod_type="BE"} 4 -# TYPE aio_mq_state_store_insertions counter -aio_mq_state_store_insertions{hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_state_store_insertions{hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_state_store_insertions{hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_state_store_insertions{hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -# TYPE aio_mq_state_store_deletions counter -aio_mq_state_store_deletions{hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_state_store_deletions{hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_state_store_deletions{hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_state_store_deletions{hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -# TYPE aio_mq_backpressure_packets_rejected counter -aio_mq_backpressure_packets_rejected{hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_backpressure_packets_rejected{hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_backpressure_packets_rejected{hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_backpressure_packets_rejected{hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -# TYPE aio_mq_publishes_sent_per_second gauge -aio_mq_publishes_sent_per_second{category="uncategorized",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_publishes_sent_per_second{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0.9 -aio_mq_publishes_sent_per_second{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 1.0666667 -aio_mq_publishes_sent_per_second{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_publishes_sent_per_second{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0.8666667 -aio_mq_publishes_sent_per_second{category="aio-opc",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_publishes_sent_per_second{category="uncategorized",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_publishes_sent_per_second{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_publishes_sent_per_second{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -# TYPE aio_mq_ping_latency_sigma_ms gauge -aio_mq_ping_latency_sigma_ms 5.326966 -# TYPE aio_mq_state_store_modifications counter -aio_mq_state_store_modifications{hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_state_store_modifications{hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_state_store_modifications{hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_state_store_modifications{hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -# TYPE aio_mq_state_store_retrievals counter -aio_mq_state_store_retrievals{hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_state_store_retrievals{hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_state_store_retrievals{hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_state_store_retrievals{hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -# TYPE aio_mq_unsubscribe_latency_mu_ms gauge -aio_mq_unsubscribe_latency_mu_ms 32087.498 -# TYPE aio_mq_total_subscriptions gauge -aio_mq_total_subscriptions{hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 8 -aio_mq_total_subscriptions{hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 2 -# TYPE aio_mq_unsubscribe_latency_last_value_ms gauge -aio_mq_unsubscribe_latency_last_value_ms 32076.668 -# TYPE aio_mq_ping_latency_last_value_ms gauge -aio_mq_ping_latency_last_value_ms 111.44446 -# TYPE aio_mq_total_sessions gauge -aio_mq_total_sessions{hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 5 -aio_mq_total_sessions{hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 2 -# TYPE aio_mq_unsubscribe_route_replication_correctness gauge -aio_mq_unsubscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1"} 0 -aio_mq_unsubscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:1 aio-mq-dmqtt-backend-2-1:1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1"} 0 -aio_mq_unsubscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1"} 0 -aio_mq_unsubscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0"} 0 -aio_mq_unsubscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:1 aio-mq-dmqtt-backend-1-1:1 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0"} 0 -aio_mq_unsubscribe_route_replication_correctness{route="aio-mq-diagnostics-probe-0 aio-mq-dmqtt-frontend-1 aio-mq-dmqtt-backend-1-0:0 aio-mq-dmqtt-backend-1-1:0 aio-mq-dmqtt-backend-2-0:0 aio-mq-dmqtt-backend-2-1:0"} 0 -# TYPE aio_mq_authentication_failures counter -aio_mq_authentication_failures{category="aio-opc",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authentication_failures{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authentication_failures{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authentication_failures{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authentication_failures{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authentication_failures{category="uncategorized",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authentication_failures{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_authentication_failures{category="uncategorized",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_authentication_failures{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -# TYPE aio_mq_publishes_sent counter -aio_mq_publishes_sent{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 4878 -aio_mq_publishes_sent{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_publishes_sent{category="aio-opc",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_publishes_sent{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 3967 -aio_mq_publishes_sent{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_publishes_sent{category="uncategorized",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_publishes_sent{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 4784 -aio_mq_publishes_sent{category="uncategorized",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_publishes_sent{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -# TYPE aio_mq_publishes_received counter -aio_mq_publishes_received{category="aio-opc",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 96 -aio_mq_publishes_received{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_publishes_received{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_publishes_received{category="uncategorized",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_publishes_received{category="uncategorized",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_publishes_received{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_publishes_received{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_publishes_received{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_publishes_received{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 1984 -# TYPE aio_mq_payload_bytes_sent counter -aio_mq_payload_bytes_sent{category="uncategorized",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_payload_bytes_sent{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_payload_bytes_sent{category="aio-opc",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_payload_bytes_sent{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 3967 -aio_mq_payload_bytes_sent{category="uncategorized",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_payload_bytes_sent{category="broker_selftest",hostname="aio-mq-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 -aio_mq_payload_bytes_sent{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_payload_bytes_sent{category="uncategorized",hostname="aio-mq-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 -aio_mq_payload_bytes_sent{category="uncategorized",hostname="aio-mq-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +qos0_messages_dropped{category="broker_selftest",direction="outgoing",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +qos0_messages_dropped{category="uncategorized",direction="outgoing",hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +qos0_messages_dropped{category="uncategorized",direction="incoming",hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +qos0_messages_dropped{category="uncategorized",direction="incoming",hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +qos0_messages_dropped{category="uncategorized",direction="incoming",hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +qos0_messages_dropped{category="aio-opc",direction="incoming",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +qos0_messages_dropped{category="uncategorized",direction="incoming",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +qos0_messages_dropped{category="uncategorized",direction="outgoing",hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +qos0_messages_dropped{category="uncategorized",direction="outgoing",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +qos0_messages_dropped{category="aio-opc",direction="outgoing",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +qos0_messages_dropped{category="uncategorized",direction="outgoing",hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +qos0_messages_dropped{category="broker_selftest",direction="incoming",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +qos0_messages_dropped{category="uncategorized",direction="incoming",hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +qos0_messages_dropped{category="broker_selftest",direction="incoming",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +qos0_messages_dropped{category="broker_selftest",direction="outgoing",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +qos0_messages_dropped{category="uncategorized",direction="outgoing",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +qos0_messages_dropped{category="uncategorized",direction="outgoing",hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +qos0_messages_dropped{category="uncategorized",direction="incoming",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +# TYPE aio_broker_store_will_messages gauge +aio_broker_store_will_messages{hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_will_messages{hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_will_messages{hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_will_messages{hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +# TYPE aio_broker_connect_route_replication_correctness gauge +aio_broker_connect_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-0 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0"} 1 +aio_broker_connect_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1"} 1 +aio_broker_connect_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-0 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0"} 1 +aio_broker_connect_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-0 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1"} 1 +aio_broker_connect_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0"} 1 +aio_broker_connect_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-0 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1"} 1 +# TYPE aio_broker_publish_latency_route_ms gauge +aio_broker_publish_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1"} 9.32115 +aio_broker_publish_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1"} 9.317781 +aio_broker_publish_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0"} 8.493393 +aio_broker_publish_latency_route_ms{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0"} 9.31679 +# TYPE aio_broker_authentication_successes counter +aio_broker_authentication_successes{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 315 +aio_broker_authentication_successes{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authentication_successes{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authentication_successes{category="uncategorized",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authentication_successes{category="aio-opc",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 1 +aio_broker_authentication_successes{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authentication_successes{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authentication_successes{category="uncategorized",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authentication_successes{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 311 +# TYPE aio_broker_publish_latency_last_value_ms gauge +aio_broker_publish_latency_last_value_ms 84.76163 +# TYPE aio_broker_authorization_deny counter +aio_broker_authorization_deny{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authorization_deny{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authorization_deny{category="uncategorized",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authorization_deny{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authorization_deny{category="uncategorized",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authorization_deny{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authorization_deny{category="aio-opc",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authorization_deny{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authorization_deny{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +# TYPE aio_broker_publishes_received_per_second gauge +aio_broker_publishes_received_per_second{category="uncategorized",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_publishes_received_per_second{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_publishes_received_per_second{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_publishes_received_per_second{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_publishes_received_per_second{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_publishes_received_per_second{category="aio-opc",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0.033333335 +aio_broker_publishes_received_per_second{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0.53333336 +aio_broker_publishes_received_per_second{category="uncategorized",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_publishes_received_per_second{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +# TYPE aio_broker_store_connected_sessions gauge +aio_broker_store_connected_sessions{hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",is_persistent="true",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_connected_sessions{hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",is_persistent="true",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_connected_sessions{hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",is_persistent="true",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_connected_sessions{hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",is_persistent="false",namespace="azure-iot-operations",pod_type="BE"} 3 +aio_broker_store_connected_sessions{hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",is_persistent="true",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_store_connected_sessions{hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",is_persistent="false",namespace="azure-iot-operations",pod_type="BE"} 3 +aio_broker_store_connected_sessions{hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",is_persistent="false",namespace="azure-iot-operations",pod_type="BE"} 4 +aio_broker_store_connected_sessions{hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",is_persistent="false",namespace="azure-iot-operations",pod_type="BE"} 4 +# TYPE aio_broker_state_store_insertions counter +aio_broker_state_store_insertions{hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_state_store_insertions{hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_state_store_insertions{hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_state_store_insertions{hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +# TYPE aio_broker_state_store_deletions counter +aio_broker_state_store_deletions{hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_state_store_deletions{hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_state_store_deletions{hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_state_store_deletions{hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +# TYPE aio_broker_backpressure_packets_rejected counter +aio_broker_backpressure_packets_rejected{hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_backpressure_packets_rejected{hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_backpressure_packets_rejected{hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_backpressure_packets_rejected{hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +# TYPE aio_broker_publishes_sent_per_second gauge +aio_broker_publishes_sent_per_second{category="uncategorized",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_publishes_sent_per_second{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0.9 +aio_broker_publishes_sent_per_second{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 1.0666667 +aio_broker_publishes_sent_per_second{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_publishes_sent_per_second{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0.8666667 +aio_broker_publishes_sent_per_second{category="aio-opc",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_publishes_sent_per_second{category="uncategorized",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_publishes_sent_per_second{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_publishes_sent_per_second{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +# TYPE aio_broker_ping_latency_sigma_ms gauge +aio_broker_ping_latency_sigma_ms 5.326966 +# TYPE aio_broker_state_store_modifications counter +aio_broker_state_store_modifications{hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_state_store_modifications{hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_state_store_modifications{hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_state_store_modifications{hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +# TYPE aio_broker_state_store_retrievals counter +aio_broker_state_store_retrievals{hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_state_store_retrievals{hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_state_store_retrievals{hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_state_store_retrievals{hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +# TYPE aio_broker_unsubscribe_latency_mu_ms gauge +aio_broker_unsubscribe_latency_mu_ms 32087.498 +# TYPE aio_broker_store_total_subscriptions gauge +aio_broker_store_total_subscriptions{hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 8 +aio_broker_store_total_subscriptions{hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 2 +# TYPE aio_broker_unsubscribe_latency_last_value_ms gauge +aio_broker_unsubscribe_latency_last_value_ms 32076.668 +# TYPE aio_broker_ping_latency_last_value_ms gauge +aio_broker_ping_latency_last_value_ms 111.44446 +# TYPE aio_broker_total_sessions gauge +aio_broker_total_sessions{hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 5 +aio_broker_total_sessions{hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 2 +# TYPE aio_broker_unsubscribe_route_replication_correctness gauge +aio_broker_unsubscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1"} 0 +aio_broker_unsubscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:1 aio-broker-dmqtt-backend-2-1:1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1"} 0 +aio_broker_unsubscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1"} 0 +aio_broker_unsubscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0"} 0 +aio_broker_unsubscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:1 aio-broker-dmqtt-backend-1-1:1 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0"} 0 +aio_broker_unsubscribe_route_replication_correctness{route="aio-broker-diagnostics-probe-0 aio-broker-dmqtt-frontend-1 aio-broker-dmqtt-backend-1-0:0 aio-broker-dmqtt-backend-1-1:0 aio-broker-dmqtt-backend-2-0:0 aio-broker-dmqtt-backend-2-1:0"} 0 +# TYPE aio_broker_authentication_failures counter +aio_broker_authentication_failures{category="aio-opc",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authentication_failures{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authentication_failures{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authentication_failures{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authentication_failures{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authentication_failures{category="uncategorized",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authentication_failures{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_authentication_failures{category="uncategorized",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_authentication_failures{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +# TYPE aio_broker_publishes_sent counter +aio_broker_publishes_sent{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 4878 +aio_broker_publishes_sent{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_publishes_sent{category="aio-opc",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_publishes_sent{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 3967 +aio_broker_publishes_sent{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_publishes_sent{category="uncategorized",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_publishes_sent{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 4784 +aio_broker_publishes_sent{category="uncategorized",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_publishes_sent{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +# TYPE aio_broker_publishes_received counter +aio_broker_publishes_received{category="aio-opc",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 96 +aio_broker_publishes_received{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_publishes_received{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_publishes_received{category="uncategorized",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_publishes_received{category="uncategorized",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_publishes_received{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_publishes_received{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_publishes_received{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_publishes_received{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 1984 +# TYPE aio_broker_payload_bytes_sent counter +aio_broker_payload_bytes_sent{category="uncategorized",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_payload_bytes_sent{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-0",instance="aefa2f1f-7576-4ba4-9627-49d4ef0045e5",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_payload_bytes_sent{category="aio-opc",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_payload_bytes_sent{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-1",instance="694fdfcb-34a6-4364-a783-dc0e3af80131",namespace="azure-iot-operations",pod_type="FE"} 3967 +aio_broker_payload_bytes_sent{category="uncategorized",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_payload_bytes_sent{category="broker_selftest",hostname="aio-broker-dmqtt-frontend-0",instance="73b35e4b-e124-41b6-98da-f23cad761c84",namespace="azure-iot-operations",pod_type="FE"} 0 +aio_broker_payload_bytes_sent{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-0",instance="37d8f0d9-7088-4db3-9c60-72157a5a47f8",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_payload_bytes_sent{category="uncategorized",hostname="aio-broker-dmqtt-backend-1-1",instance="a55e6597-6906-44b5-9678-2d6b2808998c",namespace="azure-iot-operations",pod_type="BE"} 0 +aio_broker_payload_bytes_sent{category="uncategorized",hostname="aio-broker-dmqtt-backend-2-1",instance="028d15b1-625d-4256-ad0b-e534027e96da",namespace="azure-iot-operations",pod_type="BE"} 0 # EOF diff --git a/azext_edge/tests/edge/mq/traces_data.py b/azext_edge/tests/edge/mq/traces_data.py index 2ce168ea1..1ff691e4d 100644 --- a/azext_edge/tests/edge/mq/traces_data.py +++ b/azext_edge/tests/edge/mq/traces_data.py @@ -21,7 +21,7 @@ class TestTraceData(NamedTuple): "resourceSpans": [ { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-1"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-1"}}] }, "scopeSpans": [ { @@ -61,7 +61,7 @@ class TestTraceData(NamedTuple): "resourceSpans": [ { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-1"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-1"}}] }, "scopeSpans": [ { @@ -91,7 +91,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-1"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-1"}}] }, "scopeSpans": [ { @@ -121,7 +121,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-1"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-1"}}] }, "scopeSpans": [ { @@ -152,7 +152,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-frontend-0"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-frontend-0"}}] }, "scopeSpans": [ { @@ -182,7 +182,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-frontend-0"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-frontend-0"}}] }, "scopeSpans": [ { @@ -212,7 +212,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-frontend-0"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-frontend-0"}}] }, "scopeSpans": [ { @@ -242,7 +242,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-frontend-0"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-frontend-0"}}] }, "scopeSpans": [ { @@ -264,7 +264,7 @@ class TestTraceData(NamedTuple): "stringValue": '{"id":1590916709755722418,"version":0,"trace_id":176088501121717014071716630412632417632,"parent_id":4535909789485131942,"flags":1}' }, }, - {"key": "pod_id", "value": {"stringValue": "aio-mq-dmqtt-frontend-0"}}, + {"key": "pod_id", "value": {"stringValue": "aio-broker-dmqtt-frontend-0"}}, ], "status": {}, } @@ -274,7 +274,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-frontend-0"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-frontend-0"}}] }, "scopeSpans": [ { @@ -304,7 +304,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-frontend-0"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-frontend-0"}}] }, "scopeSpans": [ { @@ -334,7 +334,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-frontend-0"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-frontend-0"}}] }, "scopeSpans": [ { @@ -364,7 +364,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-0"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-0"}}] }, "scopeSpans": [ { @@ -394,7 +394,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-0"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-0"}}] }, "scopeSpans": [ { @@ -424,7 +424,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-0"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-0"}}] }, "scopeSpans": [ { @@ -455,7 +455,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-0"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-0"}}] }, "scopeSpans": [ { @@ -480,10 +480,10 @@ class TestTraceData(NamedTuple): { "key": "cell_map", "value": { - "stringValue": "ReplicaCellMap { id: aio-mq-dmqtt-backend-2-0:1, position: Some(Backend((3, 0, Replica))), cell_map: CellMap { backends: [Chain { id: 0, replicas: [ReplicaInfo { state: Ready, role:Replica, address: aio-mq-dmqtt-backend-1-0.aio-mq-dmqtt-backend.azure-iot" + "stringValue": "ReplicaCellMap { id: aio-broker-dmqtt-backend-2-0:1, position: Some(Backend((3, 0, Replica))), cell_map: CellMap { backends: [Chain { id: 0, replicas: [ReplicaInfo { state: Ready, role:Replica, address: aio-broker-dmqtt-backend-1-0.aio-broker-dmqtt-backend.azure-iot" }, }, - {"key": "pod_id", "value": {"stringValue": "aio-mq-dmqtt-backend-2-0:1"}}, + {"key": "pod_id", "value": {"stringValue": "aio-broker-dmqtt-backend-2-0:1"}}, ], "status": {}, } @@ -493,7 +493,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-0"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-0"}}] }, "scopeSpans": [ { @@ -523,7 +523,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-0"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-0"}}] }, "scopeSpans": [ { @@ -553,7 +553,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-0"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-0"}}] }, "scopeSpans": [ { @@ -583,7 +583,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-1"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-1"}}] }, "scopeSpans": [ { @@ -599,11 +599,11 @@ class TestTraceData(NamedTuple): "endTimeUnixNano": "1701380840984477448", "attributes": [ {"key": "cell_map_version", "value": {"stringValue": "3"}}, - {"key": "pod_id", "value": {"stringValue": "aio-mq-dmqtt-backend-2-1:1"}}, + {"key": "pod_id", "value": {"stringValue": "aio-broker-dmqtt-backend-2-1:1"}}, { "key": "cell_map", "value": { - "stringValue": "ReplicaCellMap { id: aio-mq-dmqtt-backend-2-1:1, position: Some(Backend((3, 1, Tail))), cell_map: CellMap { backends: [Chain { id: 0, replicas: [ReplicaInfo{ state: Ready, role: Replica, address: aio-mq-dmqtt-backend-1-0.aio-mq-dmqtt-backend.azure-iot-op" + "stringValue": "ReplicaCellMap { id: aio-broker-dmqtt-backend-2-1:1, position: Some(Backend((3, 1, Tail))), cell_map: CellMap { backends: [Chain { id: 0, replicas: [ReplicaInfo{ state: Ready, role: Replica, address: aio-broker-dmqtt-backend-1-0.aio-broker-dmqtt-backend.azure-iot-op" }, }, { @@ -621,7 +621,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-1"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-1"}}] }, "scopeSpans": [ { @@ -651,7 +651,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-1"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-1"}}] }, "scopeSpans": [ { @@ -681,7 +681,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-1"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-1"}}] }, "scopeSpans": [ { @@ -711,7 +711,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-1"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-1"}}] }, "scopeSpans": [ { @@ -741,7 +741,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-1"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-1"}}] }, "scopeSpans": [ { @@ -771,7 +771,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-1"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-1"}}] }, "scopeSpans": [ { @@ -801,7 +801,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-1"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-1"}}] }, "scopeSpans": [ { @@ -831,7 +831,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-dmqtt-backend-2-1"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-dmqtt-backend-2-1"}}] }, "scopeSpans": [ { @@ -861,7 +861,7 @@ class TestTraceData(NamedTuple): }, { "resource": { - "attributes": [{"key": "service.name", "value": {"stringValue": "aio-mq-diagnostics-probe-0"}}] + "attributes": [{"key": "service.name", "value": {"stringValue": "aio-broker-diagnostics-probe-0"}}] }, "scopeSpans": [ { @@ -907,6 +907,6 @@ class TestTraceData(NamedTuple): ], "status": {}, }, - resource_name="aio-mq-diagnostics-probe-0", + resource_name="aio-broker-diagnostics-probe-0", timestamp=datetime(2023, 11, 30, 21, 47, 20, 937646), ) diff --git a/azext_edge/tests/edge/orchestration/resources/test_broker_listeners_unit.py b/azext_edge/tests/edge/orchestration/resources/test_broker_listeners_unit.py index f6a828561..6d8a7031d 100644 --- a/azext_edge/tests/edge/orchestration/resources/test_broker_listeners_unit.py +++ b/azext_edge/tests/edge/orchestration/resources/test_broker_listeners_unit.py @@ -46,7 +46,7 @@ def get_mock_broker_listener_record( } ], "provisioningState": "Succeeded", - "serviceName": "aio-mq-dmqtt-frontend", + "serviceName": "aio-broker-dmqtt-frontend", "serviceType": "ClusterIp", }, resource_group_name=resource_group_name, diff --git a/azext_edge/tests/edge/support/create_bundle_int/helpers.py b/azext_edge/tests/edge/support/create_bundle_int/helpers.py index 4e0f4f269..be9c4b7f2 100644 --- a/azext_edge/tests/edge/support/create_bundle_int/helpers.py +++ b/azext_edge/tests/edge/support/create_bundle_int/helpers.py @@ -83,8 +83,8 @@ def convert_file_names(files: List[str]) -> Dict[str, List[Dict[str, str]]]: if "trace" not in file_name_objs: file_name_objs["trace"] = [] # trace file - # aio-mq-dmqtt-frontend-1.Publish.b9c3173d9c2b97b75edfb6cf7cb482f2.otlp.pb - # aio-mq-dmqtt-frontend-1.Publish.b9c3173d9c2b97b75edfb6cf7cb482f2.tempo.json + # aio-broker-dmqtt-frontend-1.Publish.b9c3173d9c2b97b75edfb6cf7cb482f2.otlp.pb + # aio-broker-dmqtt-frontend-1.Publish.b9c3173d9c2b97b75edfb6cf7cb482f2.tempo.json name_obj["name"] = file_type name_obj["action"] = name.pop(0).lower() name_obj["identifier"] = name.pop(0) From bd18b9e6412c73c87c3999412ff54ece7e453154 Mon Sep 17 00:00:00 2001 From: Victoria Litvinova <73560279+vilit1@users.noreply.github.com> Date: Mon, 21 Oct 2024 10:24:26 -0700 Subject: [PATCH 06/26] feat: add `az iot ops schema` and `az iot ops schema version` (#407) --- azext_edge/edge/_help.py | 152 ++++++ azext_edge/edge/command_map.py | 19 + azext_edge/edge/commands_schema.py | 130 ++++- azext_edge/edge/params.py | 99 ++++ .../edge/providers/orchestration/common.py | 26 + .../orchestration/resources/__init__.py | 4 +- .../resources/schema_registries.py | 243 ++++++++- .../resources/test_schema_int.py | 262 ++++++++++ .../resources/test_schema_unit.py | 467 ++++++++++++++++++ 9 files changed, 1395 insertions(+), 7 deletions(-) create mode 100644 azext_edge/tests/edge/orchestration/resources/test_schema_int.py create mode 100644 azext_edge/tests/edge/orchestration/resources/test_schema_unit.py diff --git a/azext_edge/edge/_help.py b/azext_edge/edge/_help.py index d6422e1fe..86b7b76fc 100644 --- a/azext_edge/edge/_help.py +++ b/azext_edge/edge/_help.py @@ -1140,6 +1140,69 @@ def load_iotops_help(): long-summary: | Schemas are documents that describe data to enable processing and contextualization. Message schemas describe the format of a message and its contents. + A schema registry is required to create and manage schemas. + """ + + helps[ + "iot ops schema show" + ] = """ + type: command + short-summary: Show details of a schema within a schema registry. + examples: + - name: Show details of target schema 'myschema' within a schema registry 'myregistry'. + text: > + az iot ops schema show --name myschema --registry myregistry -g myresourcegroup + """ + + helps[ + "iot ops schema list" + ] = """ + type: command + short-summary: List schemas within a schema registry. + examples: + - name: List schema registeries in the schema registry 'myregistry'. + text: > + az iot ops schema list -g myresourcegroup --registry myregistry + """ + + helps[ + "iot ops schema delete" + ] = """ + type: command + short-summary: Delete a target schema within a schema registry. + examples: + - name: Delete a target schema 'myschema' within a schema registry 'myregistry'. + text: > + az iot ops schema delete --name myschema --registry myregistry -g myresourcegroup + """ + + helps[ + "iot ops schema create" + ] = """ + type: command + short-summary: Create a schema within a schema registry. + long-summary: | + This operation requires a pre-created schema registry and will add a schema version. + To create the schema and add a version, the associated storage account will need to have public network access enabled. + For more information on the delta file format, please see aka.ms/lakehouse-delta-sample + examples: + - name: Create a schema called 'myschema' in the registry 'myregistry' with minimum inputs. Schema version 1 will be created for this schema with the file content. + text: > + az iot ops schema create -n myschema -g myresourcegroup --registry myregistry + --format json --type message --version-content myschema.json + - name: Create a schema called 'myschema' with additional customization. Schema version 14 will be created for this schema. The inline content is a powershell syntax example. + text: > + az iot ops schema create -n myschema -g myresourcegroup --registry myregistry + --format delta --type message --desc "Schema for Assets" --display-name myassetschema + --version-content '{\\\"hello\\\": \\\"world\\\"}' --ver 14 --vd "14th version" + - name: Create a schema called 'myschema'. Schema version 1 will be created for this schema. The inline content is a cmd syntax example. + text: > + az iot ops schema create -n myschema -g myresourcegroup --registry myregistry + --format json --type message --version-content "{\\\"hello\\\": \\\"world\\\"}" + - name: Create a schema called 'myschema'. Schema version 1 will be created for this schema. The inline content is a bash syntax example. + text: > + az iot ops schema create -n myschema -g myresourcegroup --registry myregistry + --format json --type message --version-content '{"hello": "world"}' """ helps[ @@ -1218,3 +1281,92 @@ def load_iotops_help(): --sa-resource-id $STORAGE_ACCOUNT_RESOURCE_ID --sa-container myschemacontainer -l westus2 --desc 'Contoso factory X1 schemas' --display-name 'Contoso X1' --tags env=prod """ + + helps[ + "iot ops schema version" + ] = """ + type: group + short-summary: Schema version management. + long-summary: | + A schema version contains the schema content associated with that version. + """ + + helps[ + "iot ops schema version show" + ] = """ + type: command + short-summary: Show details of a schema version. + examples: + - name: Show details of target schema version 1. + text: > + az iot ops schema version show --name 1 --schema myschema --registry myregistry -g myresourcegroup + """ + + helps[ + "iot ops schema version list" + ] = """ + type: command + short-summary: List schema versions for a specific schema. + examples: + - name: List all schema versions for the schema 'myschema' in the schema registry 'myregistry'. + text: > + az iot ops schema version list -g myresourcegroup --registry myregistry --schema myschema + """ + + helps[ + "iot ops schema version remove" + ] = """ + type: command + short-summary: Remove a target schema version. + examples: + - name: Remove schema version 1. + text: > + az iot ops schema version remove -n 1 -g myresourcegroup --registry myregistry --schema myschema + """ + + helps[ + "iot ops schema version add" + ] = """ + type: command + short-summary: Add a schema version to a schema. + long-summary: | + To add a version, the associated storage account will need to have public network access enabled. + For more information on the delta file format, please see aka.ms/lakehouse-delta-sample + examples: + - name: Add a schema version 1 to a schema called 'myschema' within the registry 'myregistry' with + minimum inputs. The content is inline json (powershell syntax example). + text: > + az iot ops schema version add -n 1 -g myresourcegroup --registry myregistry --schema myschema --content '{\\\"hello\\\": \\\"world\\\"}' + - name: Add a schema version 1 to a schema called 'myschema' within the registry 'myregistry' with + minimum inputs. The content is inline json (cmd syntax example). + text: > + az iot ops schema version add -n 1 -g myresourcegroup --registry myregistry --schema myschema --content "{\\\"hello\\\": \\\"world\\\"}" + - name: Add a schema version 1 to a schema called 'myschema' within the registry 'myregistry' with + minimum inputs. The content is inline json (bash syntax example). + text: > + az iot ops schema version add -n 1 -g myresourcegroup --registry myregistry --schema myschema --content '{"hello": "world"}' + - name: Add a schema version 2 to a schema called 'myschema' within the registry 'myregistry' with + a description. The file should contain the schema content. + text: > + az iot ops schema version add -n 2 -g myresourcegroup --registry myregistry --schema myschema --content myschemav2.json --desc "New schema" + """ + + helps[ + "iot ops schema show-dataflow-refs" + ] = """ + type: command + short-summary: Show the schema references used for dataflows. + examples: + - name: Show schema reference for schema "myschema" and version 1. + text: > + az iot ops schema show-dataflow-refs --version 1 --schema myschema --registry myregistry -g myresourcegroup + - name: Show schema reference for all versions in schema "myschema". + text: > + az iot ops schema show-dataflow-refs --schema myschema --registry myregistry -g myresourcegroup + - name: Show schema reference for all versions and schemas in schema registry "myregistry". + text: > + az iot ops schema show-dataflow-refs --registry myregistry -g myresourcegroup + - name: Show schema reference for all schemas but only the latest versions in schema registry "myregistry". + text: > + az iot ops schema show-dataflow-refs --registry myregistry -g myresourcegroup --latest + """ diff --git a/azext_edge/edge/command_map.py b/azext_edge/edge/command_map.py index 31dfa55fd..44a7a853e 100644 --- a/azext_edge/edge/command_map.py +++ b/azext_edge/edge/command_map.py @@ -164,6 +164,16 @@ def load_iotops_commands(self, _): ) as cmd_group: cmd_group.command("opcua", "create_opcua_asset_endpoint_profile") + with self.command_group( + "iot ops schema", + command_type=schema_resource_ops, + ) as cmd_group: + cmd_group.command("create", "create_schema") + cmd_group.show_command("show", "show_schema") + cmd_group.command("list", "list_schemas") + cmd_group.command("show-dataflow-refs", "list_schema_versions_dataflow_format") + cmd_group.command("delete", "delete_schema") + with self.command_group( "iot ops schema registry", command_type=schema_resource_ops, @@ -172,3 +182,12 @@ def load_iotops_commands(self, _): cmd_group.show_command("show", "show_registry") cmd_group.command("list", "list_registries") cmd_group.command("delete", "delete_registry") + + with self.command_group( + "iot ops schema version", + command_type=schema_resource_ops, + ) as cmd_group: + cmd_group.command("add", "add_version") + cmd_group.show_command("show", "show_version") + cmd_group.command("list", "list_versions") + cmd_group.command("remove", "remove_version") diff --git a/azext_edge/edge/commands_schema.py b/azext_edge/edge/commands_schema.py index 24ca93fef..b4baf3825 100644 --- a/azext_edge/edge/commands_schema.py +++ b/azext_edge/edge/commands_schema.py @@ -8,7 +8,7 @@ from knack.log import get_logger -from .providers.orchestration.resources import SchemaRegistries +from .providers.orchestration.resources import SchemaRegistries, Schemas logger = get_logger(__name__) @@ -60,3 +60,131 @@ def delete_registry( return SchemaRegistries(cmd).delete( name=schema_registry_name, resource_group_name=resource_group_name, confirm_yes=confirm_yes, **kwargs ) + + +# Schemas +def create_schema( + cmd, + schema_name: str, + schema_registry_name: str, + resource_group_name: str, + schema_type: str, + schema_format: str, + schema_version_content: str, + schema_version: int = 1, + description: Optional[str] = None, + display_name: Optional[str] = None, + schema_version_description: Optional[str] = None +) -> dict: + return Schemas(cmd).create( + name=schema_name, + schema_registry_name=schema_registry_name, + schema_type=schema_type, + schema_format=schema_format, + description=description, + display_name=display_name, + resource_group_name=resource_group_name, + schema_version_content=schema_version_content, + schema_version=schema_version, + schema_version_description=schema_version_description + ) + + +def show_schema(cmd, schema_name: str, schema_registry_name: str, resource_group_name: str) -> dict: + return Schemas(cmd).show( + name=schema_name, + schema_registry_name=schema_registry_name, + resource_group_name=resource_group_name + ) + + +def list_schemas(cmd, schema_registry_name: str, resource_group_name: str) -> dict: + return Schemas(cmd).list(schema_registry_name=schema_registry_name, resource_group_name=resource_group_name) + + +def list_schema_versions_dataflow_format( + cmd, + schema_registry_name: str, + resource_group_name: str, + schema_name: Optional[str] = None, + schema_version: Optional[str] = None, + latest: Optional[bool] = None +) -> dict: + return Schemas(cmd).list_dataflow_friendly_versions( + schema_registry_name=schema_registry_name, + resource_group_name=resource_group_name, + schema_name=schema_name, + schema_version=schema_version, + latest=latest + ) + + +def delete_schema( + cmd, + schema_name: str, + schema_registry_name: str, + resource_group_name: str, + confirm_yes: Optional[bool] = None, +) -> dict: + return Schemas(cmd).delete( + name=schema_name, + schema_registry_name=schema_registry_name, + resource_group_name=resource_group_name, + confirm_yes=confirm_yes, + ) + + +# Versions +def add_version( + cmd, + version_name: int, + schema_name: str, + schema_registry_name: str, + resource_group_name: str, + schema_version_content: str, + description: Optional[str] = None, +) -> dict: + return Schemas(cmd).add_version( + name=version_name, + schema_name=schema_name, + schema_registry_name=schema_registry_name, + schema_version_content=schema_version_content, + description=description, + resource_group_name=resource_group_name, + ) + + +def show_version( + cmd, version_name: int, schema_name: str, schema_registry_name: str, resource_group_name: str +) -> dict: + return Schemas(cmd).show_version( + name=version_name, + schema_name=schema_name, + schema_registry_name=schema_registry_name, + resource_group_name=resource_group_name + ) + + +def list_versions( + cmd, schema_name: str, schema_registry_name: str, resource_group_name: str +) -> dict: + return Schemas(cmd).list_versions( + schema_name=schema_name, + schema_registry_name=schema_registry_name, + resource_group_name=resource_group_name + ) + + +def remove_version( + cmd, + version_name: int, + schema_name: str, + schema_registry_name: str, + resource_group_name: str, +) -> dict: + return Schemas(cmd).remove_version( + name=version_name, + schema_name=schema_name, + schema_registry_name=schema_registry_name, + resource_group_name=resource_group_name + ) diff --git a/azext_edge/edge/params.py b/azext_edge/edge/params.py index 9fd3f4979..674c76065 100644 --- a/azext_edge/edge/params.py +++ b/azext_edge/edge/params.py @@ -31,6 +31,8 @@ MqMemoryProfile, MqServiceType, TRUST_SETTING_KEYS, + SchemaFormat, + SchemaType, ) @@ -1134,6 +1136,79 @@ def load_iotops_arguments(self, _): arg_group="Connector", ) + with self.argument_context("iot ops schema") as context: + context.argument( + "schema_name", + options_list=["--name", "-n"], + help="Schema name.", + ) + context.argument( + "schema_registry_name", + options_list=["--registry"], + help="Schema registry name.", + ) + context.argument( + "schema_format", + options_list=["--format"], + help="Schema format.", + arg_type=get_enum_type(SchemaFormat) + ) + context.argument( + "schema_type", + options_list=["--type"], + help="Schema type.", + arg_type=get_enum_type(SchemaType) + ) + context.argument( + "description", + options_list=["--desc"], + help="Description for the schema.", + ) + context.argument( + "display_name", + options_list=["--display-name"], + help="Display name for the schema.", + ) + context.argument( + "schema_version", + options_list=["--version", "--ver"], + help="Schema version name.", + type=int, + arg_group="Version" + ) + context.argument( + "schema_version_content", + options_list=["--version-content", "--vc"], + help="File path containing or inline content for the version.", + arg_group="Version" + ) + context.argument( + "schema_version_description", + options_list=["--version-desc", "--vd"], + help="Description for the version.", + arg_group="Version" + ) + + with self.argument_context("iot ops schema show-dataflow-refs") as context: + context.argument( + "schema_name", + options_list=["--schema"], + help="Schema name. Required if using --version.", + ) + context.argument( + "schema_version", + options_list=["--version", "--ver"], + help="Schema version name. If used, --latest will be ignored.", + type=int, + arg_group=None + ) + context.argument( + "latest", + options_list=["--latest"], + help="Flag to show only the latest version(s).", + arg_type=get_three_state_flag(), + ) + with self.argument_context("iot ops schema registry") as context: context.argument( "schema_registry_name", @@ -1184,3 +1259,27 @@ def load_iotops_arguments(self, _): help="Fully qualified role definition Id in the following format: " "/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/roleDefinitions/{roleId}", ) + + with self.argument_context("iot ops schema version") as context: + context.argument( + "version_name", + options_list=["--name", "-n"], + help="Schema version name.", + type=int + ) + context.argument( + "schema_name", + options_list=["--schema"], + help="Schema name.", + ) + context.argument( + "description", + options_list=["--desc"], + help="Description for the schema version.", + ) + context.argument( + "schema_version_content", + options_list=["--content"], + help="File path containing or inline content for the version.", + arg_group=None + ) diff --git a/azext_edge/edge/providers/orchestration/common.py b/azext_edge/edge/providers/orchestration/common.py index 8f527197b..6e3ca15ba 100644 --- a/azext_edge/edge/providers/orchestration/common.py +++ b/azext_edge/edge/providers/orchestration/common.py @@ -59,3 +59,29 @@ class KubernetesDistroType(Enum): class IdentityUsageType(Enum): dataflow = "dataflow" + + +class SchemaType(Enum): + """value is user friendly, full_value is the service friendly""" + message = "message" + + @property + def full_value(self) -> str: + type_map = { + SchemaType.message: "MessageSchema" + } + return type_map[self] + + +class SchemaFormat(Enum): + """value is user friendly, full_value is the service friendly""" + json = "json" + delta = "delta" + + @property + def full_value(self) -> str: + format_map = { + SchemaFormat.json: "JsonSchema/draft-07", + SchemaFormat.delta: "Delta/1.0" + } + return format_map[self] diff --git a/azext_edge/edge/providers/orchestration/resources/__init__.py b/azext_edge/edge/providers/orchestration/resources/__init__.py index 3493bd7ca..014128f67 100644 --- a/azext_edge/edge/providers/orchestration/resources/__init__.py +++ b/azext_edge/edge/providers/orchestration/resources/__init__.py @@ -8,7 +8,8 @@ from .clusters import ConnectedClusters from .dataflows import DataFlowEndpoints, DataFlowProfiles from .instances import Instances -from .schema_registries import SchemaRegistries +from .schema_registries import SchemaRegistries, Schemas + __all__ = [ "Brokers", @@ -17,4 +18,5 @@ "DataFlowProfiles", "Instances", "SchemaRegistries", + "Schemas", ] diff --git a/azext_edge/edge/providers/orchestration/resources/schema_registries.py b/azext_edge/edge/providers/orchestration/resources/schema_registries.py index da4b800e2..66460198d 100644 --- a/azext_edge/edge/providers/orchestration/resources/schema_registries.py +++ b/azext_edge/edge/providers/orchestration/resources/schema_registries.py @@ -6,8 +6,8 @@ from typing import TYPE_CHECKING, Iterable, Optional -from azure.cli.core.azclierror import ValidationError -from azure.core.exceptions import ResourceNotFoundError +from azure.cli.core.azclierror import ValidationError, FileOperationError, ForbiddenError, InvalidArgumentValueError +from azure.core.exceptions import HttpResponseError, ResourceNotFoundError from knack.log import get_logger from rich.console import Console @@ -20,18 +20,19 @@ ) from ....util.common import should_continue_prompt from ....util.queryable import Queryable +from ..common import SchemaFormat, SchemaType from ..permissions import PermissionManager, ROLE_DEF_FORMAT_STR logger = get_logger(__name__) - console = Console() - - if TYPE_CHECKING: from ....vendor.clients.deviceregistrymgmt.operations import ( SchemaRegistriesOperations, + SchemasOperations, + SchemaVersionsOperations, ) + STORAGE_BLOB_DATA_CONTRIBUTOR_ROLE_ID = "ba92f5b4-2d11-453d-a403-e96b0029c9fe" @@ -164,3 +165,235 @@ def delete(self, name: str, resource_group_name: str, confirm_yes: Optional[bool with console.status("Working..."): poller = self.ops.begin_delete(resource_group_name=resource_group_name, schema_registry_name=name) return wait_for_terminal_state(poller, **kwargs) + + +class Schemas(Queryable): + def __init__(self, cmd): + super().__init__(cmd=cmd) + self.registry_mgmt_client = get_registry_mgmt_client( + subscription_id=self.default_subscription_id, + ) + self.ops: "SchemasOperations" = self.registry_mgmt_client.schemas + self.version_ops: "SchemaVersionsOperations" = self.registry_mgmt_client.schema_versions + + def create( + self, + name: str, + schema_registry_name: str, + resource_group_name: str, + schema_type: str, + schema_format: str, + schema_version_content: str, + schema_version: int = 1, + description: Optional[str] = None, + display_name: Optional[str] = None, + schema_version_description: Optional[str] = None + ) -> dict: + with console.status("Working...") as c: + schema_type = SchemaType[schema_type].full_value + schema_format = SchemaFormat[schema_format].full_value + resource = { + "properties": { + "format": schema_format, + "schemaType": schema_type, + "description": description, + "displayName": display_name, + }, + } + schema = self.ops.create_or_replace( + resource_group_name=resource_group_name, + schema_registry_name=schema_registry_name, + schema_name=name, + resource=resource + ) + logger.info(f"Created schema {name}.") + # TODO: maybe add in an exception catch for auth errors + self.add_version( + name=schema_version, + schema_version_content=schema_version_content, + schema_name=name, + schema_registry_name=schema_registry_name, + resource_group_name=resource_group_name, + description=schema_version_description, + current_console=c + ) + logger.info(f"Added version {schema_version} to schema {name}.") + return schema + + def show(self, name: str, schema_registry_name: str, resource_group_name: str) -> dict: + return self.ops.get( + resource_group_name=resource_group_name, + schema_registry_name=schema_registry_name, + schema_name=name + ) + + def list(self, schema_registry_name: str, resource_group_name: str) -> Iterable[dict]: + return self.ops.list_by_schema_registry( + resource_group_name=resource_group_name, schema_registry_name=schema_registry_name + ) + + def delete( + self, + name: str, + schema_registry_name: str, + resource_group_name: str, + confirm_yes: Optional[bool] = None, + ): + if not should_continue_prompt(confirm_yes=confirm_yes): + return + + with console.status("Working..."): + return self.ops.delete( + resource_group_name=resource_group_name, + schema_registry_name=schema_registry_name, + schema_name=name + ) + + def add_version( + self, + name: int, + schema_name: str, + schema_registry_name: str, + resource_group_name: str, + schema_version_content: str, + description: Optional[str] = None, + current_console: Optional[Console] = None, + ) -> dict: + from ....util import read_file_content + + if name < 0: + raise InvalidArgumentValueError("Version must be a positive number") + + try: + logger.debug("Processing schema content.") + schema_version_content = read_file_content(schema_version_content) + except FileOperationError: + logger.debug("Given schema content is not a file.") + pass + + resource = { + "properties": { + "schemaContent": schema_version_content, + "description": description, + }, + } + try: + with current_console or console.status("Working..."): + return self.version_ops.create_or_replace( + resource_group_name=resource_group_name, + schema_registry_name=schema_registry_name, + schema_name=schema_name, + schema_version_name=name, + resource=resource + ) + except HttpResponseError as e: + if "AuthorizationFailure" in e.message: + raise ForbiddenError( + "Schema versions require public network access to be enabled in the associated storage account." + ) + raise e + + def show_version( + self, + name: int, + schema_name: str, + schema_registry_name: str, + resource_group_name: str, + ) -> dict: + # service verifies hash during create already + return self.version_ops.get( + resource_group_name=resource_group_name, + schema_registry_name=schema_registry_name, + schema_name=schema_name, + schema_version_name=name, + ) + + def list_versions( + self, schema_name: str, schema_registry_name: str, resource_group_name: str + ) -> Iterable[dict]: + return self.version_ops.list_by_schema( + resource_group_name=resource_group_name, + schema_registry_name=schema_registry_name, + schema_name=schema_name + ) + + def remove_version( + self, + name: int, + schema_name: str, + schema_registry_name: str, + resource_group_name: str, + ): + with console.status("Working..."): + return self.version_ops.delete( + resource_group_name=resource_group_name, + schema_registry_name=schema_registry_name, + schema_name=schema_name, + schema_version_name=name, + ) + + def list_dataflow_friendly_versions( + self, + schema_registry_name: str, + resource_group_name: str, + schema_name: Optional[str] = None, + schema_version: Optional[int] = None, + latest: bool = False + ) -> dict: + from collections import OrderedDict + # note temporary until dataflow create is added. + versions_map = {} + with console.status("Fetching version info..."): + # get all the versions first + if schema_name and schema_version: + versions_map[schema_name] = [int(schema_version)] + elif schema_name: + versions_map.update( + self._get_schema_version_dict( + schema_name=schema_name, + schema_registry_name=schema_registry_name, + resource_group_name=resource_group_name, + latest=latest + ) + ) + elif schema_version: + # TODO: maybe do the weird + raise InvalidArgumentValueError( + "Please provide the schema name if schema versions is used." + ) + else: + schema_list = self.list( + schema_registry_name=schema_registry_name, resource_group_name=resource_group_name + ) + for schema in schema_list: + versions_map.update( + self._get_schema_version_dict( + schema_name=schema["name"], + schema_registry_name=schema_registry_name, + resource_group_name=resource_group_name, + latest=latest + ) + ) + + ref_format = "aio-sr://{schema}:{version}" + # change to ordered dict for order, azure cli does not like the int keys at that level + for schema_name, versions_list in versions_map.items(): + ordered = OrderedDict( + (str(ver), ref_format.format(schema=schema_name, version=ver)) for ver in versions_list + ) + versions_map[schema_name] = ordered + + return versions_map + + def _get_schema_version_dict( + self, schema_name: str, schema_registry_name: str, resource_group_name: str, latest: bool = False + ) -> dict: + version_list = self.list_versions( + schema_name=schema_name, + schema_registry_name=schema_registry_name, + resource_group_name=resource_group_name + ) + version_list = [int(ver["name"]) for ver in version_list] + if latest: + version_list = [max(version_list)] + return {schema_name: sorted(version_list)} diff --git a/azext_edge/tests/edge/orchestration/resources/test_schema_int.py b/azext_edge/tests/edge/orchestration/resources/test_schema_int.py new file mode 100644 index 000000000..75a16a325 --- /dev/null +++ b/azext_edge/tests/edge/orchestration/resources/test_schema_int.py @@ -0,0 +1,262 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- + +from random import randint +import json +from ....generators import generate_random_string +from ....helpers import run + +VERSION_STRINGIFY_FORMAT = "aio-sr://{schema_name}:{version}" + + +def test_schema_lifecycle(settings_with_rg, tracked_resources, tracked_files): + storage_account_name = f"teststore{generate_random_string(force_lower=True, size=6)}" + registry_name = f"test-registry-{generate_random_string(force_lower=True, size=6)}" + registry_rg = settings_with_rg.env.azext_edge_rg + registry_namespace = f"test-namespace-{generate_random_string(force_lower=True, size=6)}" + # create the storage account and get the id + # NOTE: storage account needs to have public network access enabled to work. + # if we want to check the blobs (aka see that the schema content goes in the right place) + # we would need to enable shared key access too... + storage_account = run( + f"az storage account create -n {storage_account_name} -g {registry_rg} " + "--enable-hierarchical-namespace " + "--allow-shared-key-access false --allow-blob-public-access false" + ) + tracked_resources.append(storage_account['id']) + + # create the registry + registry = run( + f"az iot ops schema registry create -n {registry_name} -g {registry_rg} " + f"--rn {registry_namespace} --sa-resource-id {storage_account['id']} " + ) + tracked_resources.append(registry["id"]) + + # CREATE 1 with min version args + schema_name1 = f"schema-{generate_random_string(force_lower=True, size=6)}" + schema_name2 = f"schema-{generate_random_string(force_lower=True, size=6)}" + delta_content = generate_random_string() + version_num = 1 + schema1 = run( + f"az iot ops schema create -n {schema_name1} -g {registry_rg} --registry {registry_name} " + f"--format delta --type message --version-content {delta_content}" + ) + assert_schema( + schema=schema1, + name=schema_name1, + resource_group=registry_rg, + registry_name=registry_name, + schema_type="MessageSchema", + format="delta" + ) + + # SHOW + schema_show = run(f"az iot ops schema show -n {schema_name1} -g {registry_rg} --registry {registry_name}") + assert_schema( + schema=schema_show, + name=schema_name1, + resource_group=registry_rg, + registry_name=registry_name, + schema_type="MessageSchema", + format="delta" + ) + + # SHOW VERSION + version_show = run( + f"az iot ops schema version show -n {version_num} --schema {schema_name1} -g {registry_rg} " + f"--registry {registry_name}" + ) + assert_schema_version( + version=version_show, + name=version_num, + schema_name=schema_name1, + registry_name=registry_name, + resource_group=registry_rg, + schema_version_content=delta_content + ) + + # VERSION PRINTS + version_strings1 = run( + f"az iot ops schema show-dataflow-refs --schema {schema_name1} -g {registry_rg} " + f"--registry {registry_name} --ver {version_num}" + ) + assert schema_name1 in version_strings1 + assert str(version_num) in version_strings1[schema_name1] + assert version_strings1[schema_name1][str(version_num)] == VERSION_STRINGIFY_FORMAT.format( + version=version_num, schema_name=schema_name1 + ) + + # CREATE 2 with max version args + description = f"{generate_random_string()} {generate_random_string()}" + display_name = generate_random_string() + + version_desc = generate_random_string() + version_num = randint(2, 100) + version_num2 = version_num + randint(1, 10) + json_content = json.dumps({ + generate_random_string(): generate_random_string(), + generate_random_string(): { + generate_random_string(): generate_random_string() + }, + generate_random_string(): generate_random_string() + }) + file_name = f"test_schema_version_content_{generate_random_string(size=4)}.json" + tracked_files.append(file_name) + with open(file_name, "w", encoding="utf-8") as f: + f.write(json_content) + + schema2 = run( + f"az iot ops schema create -n {schema_name2} -g {registry_rg} --registry {registry_name} " + f"--format json --type message --desc \"{description}\" --display-name {display_name} " + f"--vc {file_name} --vd {version_desc} --ver {version_num}" + ) + assert_schema( + schema=schema2, + name=schema_name2, + resource_group=registry_rg, + registry_name=registry_name, + schema_type="MessageSchema", + format="json", + description=description, + display_name=display_name + ) + + # ADD VERSION + inline_content = json.dumps({ + generate_random_string(): generate_random_string() + }) + # fun stuff to make sure the inline is actually formatted correctly in the command + test_content = inline_content.replace('"', '\\"') + version_add = run( + f"az iot ops schema version add -n {version_num2} --schema {schema_name2} -g {registry_rg} " + f"--registry {registry_name} --content \"{test_content}\"" + ) + assert_schema_version( + version=version_add, + name=version_num2, + schema_name=schema_name2, + registry_name=registry_name, + resource_group=registry_rg, + schema_version_content=inline_content, + ) + + # LIST VERSION + version_list = run( + f"az iot ops schema version list --schema {schema_name2} -g {registry_rg} " + f"--registry {registry_name}" + ) + version_map = {int(ver["name"]): ver for ver in version_list} + assert version_num in version_map + assert version_num2 in version_map + assert_schema_version( + version=version_map[version_num], + name=version_num, + schema_name=schema_name2, + registry_name=registry_name, + schema_version_content=json_content, + resource_group=registry_rg, + description=version_desc + ) + + # VERSION PRINTS + version_strings2 = run( + f"az iot ops schema show-dataflow-refs --schema {schema_name2} -g {registry_rg} --registry {registry_name}" + ) + assert schema_name2 in version_strings2 + assert str(version_num) in version_strings2[schema_name2] + assert str(version_num2) in version_strings2[schema_name2] + assert version_strings2[schema_name2][str(version_num)] == VERSION_STRINGIFY_FORMAT.format( + version=version_num, schema_name=schema_name2 + ) + assert version_strings2[schema_name2][str(version_num2)] == VERSION_STRINGIFY_FORMAT.format( + version=version_num2, schema_name=schema_name2 + ) + + # all versions + schemas + version_strings_all = run( + f"az iot ops schema show-dataflow-refs -g {registry_rg} --registry {registry_name}" + ) + assert schema_name1 in version_strings_all + assert schema_name2 in version_strings_all + assert version_strings_all[schema_name1][str(1)] == VERSION_STRINGIFY_FORMAT.format( + version=1, schema_name=schema_name1 + ) + assert version_strings_all[schema_name2][str(version_num)] == VERSION_STRINGIFY_FORMAT.format( + version=version_num, schema_name=schema_name2 + ) + assert version_strings_all[schema_name2][str(version_num2)] == VERSION_STRINGIFY_FORMAT.format( + version=version_num2, schema_name=schema_name2 + ) + + # latest should only contain schema1 + version and schema2 + latest version + version_strings_latest = run( + f"az iot ops schema show-dataflow-refs -g {registry_rg} --registry {registry_name} --latest" + ) + assert version_strings_latest[schema_name1][str(1)] == VERSION_STRINGIFY_FORMAT.format( + version=1, schema_name=schema_name1 + ) + assert version_strings_latest[schema_name2][str(version_num2)] == VERSION_STRINGIFY_FORMAT.format( + version=version_num2, schema_name=schema_name2 + ) + assert str(version_num) not in version_strings_latest[schema_name2] + + # REMOVE VERSION + run( + f"az iot ops schema version remove -n {version_num} --schema {schema_name2} -g {registry_rg} " + f"--registry {registry_name}" + ) + + # LIST + version_list = run( + f"az iot ops schema version list --schema {schema_name2} -g {registry_rg} " + f"--registry {registry_name}" + ) + version_map = [int(ver["name"]) for ver in version_list] + assert version_num not in version_map + assert version_num2 in version_map + + # LIST + schema_list = run(f"az iot ops schema list -g {registry_rg} --registry {registry_name}") + schema_names = [schema["name"] for schema in schema_list] + assert schema_name1 in schema_names + assert schema_name2 in schema_names + + # DELETE + run(f"az iot ops schema delete -n {schema_name1} -g {registry_rg} --registry {registry_name} -y") + run(f"az iot ops schema delete -n {schema_name2} -g {registry_rg} --registry {registry_name} -y") + schema_list = run(f"az iot ops schema list -g {registry_rg} --registry {registry_name}") + schema_names = [schema["name"] for schema in schema_list] + assert schema_name1 not in schema_names + assert schema_name2 not in schema_names + + +def assert_schema(schema: dict, **expected): + format_map = { + "json": "JsonSchema/draft-07", + "delta": "Delta/1.0" + } + assert schema["name"] == expected["name"] + assert schema["resourceGroup"] == expected["resource_group"] + # note: trying to do exact name match hence split + assert expected["registry_name"] in schema["id"].split("/") + + schema_props = schema["properties"] + assert schema_props["schemaType"] == expected["schema_type"] + assert schema_props["format"] == format_map[expected["format"]] + assert schema_props.get("description") == expected.get("description") + assert schema_props.get("displayName") == expected.get("display_name") + + +def assert_schema_version(version: dict, **expected): + assert version["name"] == str(expected["name"]) + assert version["resourceGroup"] == expected["resource_group"] + # note: trying to do exact name match hence split + assert expected["registry_name"] in version["id"].split("/") + assert expected["schema_name"] in version["id"].split("/") + + assert version["properties"]["hash"] + assert version["properties"]["schemaContent"] == expected["schema_version_content"] + assert version["properties"].get("description") == expected.get("description") diff --git a/azext_edge/tests/edge/orchestration/resources/test_schema_unit.py b/azext_edge/tests/edge/orchestration/resources/test_schema_unit.py new file mode 100644 index 000000000..b4acebd58 --- /dev/null +++ b/azext_edge/tests/edge/orchestration/resources/test_schema_unit.py @@ -0,0 +1,467 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- + +import json +from random import randint +from typing import Optional + +import pytest +import responses + +from azext_edge.edge.commands_schema import ( + create_schema, + delete_schema, + list_schemas, + show_schema, + add_version, + show_version, + list_versions, + remove_version +) +from azext_edge.edge.providers.orchestration.common import SchemaFormat, SchemaType +from ....generators import generate_random_string +from .conftest import get_base_endpoint, get_mock_resource + +SCHEMA_RP = "Microsoft.DeviceRegistry" +SCHEMA_REGISTRY_RP_API_VERSION = "2024-09-01-preview" + + +def get_schema_endpoint( + resource_group_name: str, + registry_name: str, + schema_name: Optional[str] = None +) -> str: + resource_path = f"/schemaRegistries/{registry_name}/schemas" + if schema_name: + resource_path += f"/{schema_name}" + return get_base_endpoint( + resource_group_name=resource_group_name, + resource_path=resource_path, + resource_provider=SCHEMA_RP, + api_version=SCHEMA_REGISTRY_RP_API_VERSION, + ) + + +def get_schema_version_endpoint( + resource_group_name: str, + registry_name: str, + schema_name: str, + schema_version: Optional[str] = None +) -> str: + resource_path = "/schemaVersions" + if schema_version: + resource_path += f"/{schema_version}" + resource_path = get_schema_endpoint( + resource_group_name=resource_group_name, + registry_name=registry_name, + schema_name=schema_name + resource_path + ) + return resource_path + + +def get_mock_schema_record( + name: str, + registry_name: str, + resource_group_name: str +) -> dict: + record = get_mock_resource( + name=name, + resource_provider=SCHEMA_RP, + resource_path=f"/schemaRegistries/{registry_name}/schemas/{name}", + properties={ + "provisioningState": "Succeeded", + "uuid": "4630b849-a08a-44f9-af0a-9821098b1b1e", + }, + resource_group_name=resource_group_name, + qualified_type="microsoft.deviceregistry/schemaregistries/schemas", + ) + record.pop("extendedLocation") + record.pop("location") + return record + + +def get_mock_schema_version_record( + name: int, + schema_name: str, + registry_name: str, + resource_group_name: str +) -> dict: + record = get_mock_resource( + name=str(name), + resource_provider=SCHEMA_RP, + resource_path=f"/schemaRegistries/{registry_name}/schemas/{schema_name}/schemaVersions/{name}", + properties={ + "provisioningState": "Succeeded", + "uuid": "4630b849-a08a-44f9-af0a-9821098b1b1e", + }, + resource_group_name=resource_group_name, + qualified_type="microsoft.deviceregistry/schemaregistries/schemas/schemaversions", + ) + record.pop("extendedLocation") + record.pop("location") + return record + + +def test_schema_show(mocked_cmd, mocked_responses: responses): + schema_name = generate_random_string() + registry_name = generate_random_string() + resource_group_name = generate_random_string() + + mock_record = get_mock_schema_record( + name=schema_name, + registry_name=registry_name, + resource_group_name=resource_group_name, + ) + mocked_responses.add( + method=responses.GET, + url=get_schema_endpoint( + resource_group_name=resource_group_name, + registry_name=registry_name, + schema_name=schema_name + ), + json=mock_record, + status=200, + content_type="application/json", + ) + result = show_schema( + cmd=mocked_cmd, + schema_name=schema_name, + schema_registry_name=registry_name, + resource_group_name=resource_group_name + ) + + assert result == mock_record + assert len(mocked_responses.calls) == 1 + + +@pytest.mark.parametrize( + "records", + [0, 2], +) +def test_schema_list(mocked_cmd, mocked_responses: responses, records: int): + resource_group_name = generate_random_string() + registry_name = generate_random_string() + mock_records = { + "value": [ + get_mock_schema_record( + name=generate_random_string(), + registry_name=registry_name, + resource_group_name=resource_group_name, + ) + for _ in range(records) + ] + } + + mocked_responses.add( + method=responses.GET, + url=get_schema_endpoint( + resource_group_name=resource_group_name, + registry_name=registry_name, + ), + json=mock_records, + status=200, + content_type="application/json", + ) + + result = list(list_schemas( + cmd=mocked_cmd, + resource_group_name=resource_group_name, + schema_registry_name=registry_name + )) + + assert result == mock_records["value"] + assert len(mocked_responses.calls) == 1 + + +def test_schema_delete(mocked_cmd, mocked_responses: responses): + schema_name = generate_random_string() + registry_name = generate_random_string() + resource_group_name = generate_random_string() + + mocked_responses.add( + method=responses.DELETE, + url=get_schema_endpoint( + resource_group_name=resource_group_name, + registry_name=registry_name, + schema_name=schema_name + ), + status=200, + content_type="application/json", + ) + delete_schema( + cmd=mocked_cmd, + schema_name=schema_name, + schema_registry_name=registry_name, + resource_group_name=resource_group_name, + confirm_yes=True, + ) + assert len(mocked_responses.calls) == 1 + + +@pytest.mark.parametrize( + "schema_format", + ["delta", "json"] +) +@pytest.mark.parametrize( + "schema_type", + ["message"], +) +@pytest.mark.parametrize( + "display_name,description,version_num,version_description", + [ + (None, None, None, None), + (generate_random_string(), generate_random_string(), randint(2, 10), generate_random_string()), + ], +) +def test_schema_create( + mocked_cmd, + mocked_responses: responses, + schema_type: str, + schema_format: str, + display_name: Optional[str], + description: Optional[str], + version_num: Optional[int], + version_description: Optional[str] +): + schema_name = generate_random_string() + registry_name = generate_random_string() + resource_group_name = generate_random_string() + schema_version_content = generate_random_string() + + create_registry_kwargs = { + "cmd": mocked_cmd, + "schema_name": schema_name, + "schema_format": schema_format, + "schema_type": schema_type, + "schema_registry_name": registry_name, + "resource_group_name": resource_group_name, + "display_name": display_name, + "description": description, + "schema_version_content": schema_version_content, + "schema_version_description": version_description + } + if version_num: + create_registry_kwargs["schema_version"] = version_num + + mock_record = get_mock_schema_record( + name=schema_name, + registry_name=registry_name, + resource_group_name=resource_group_name, + ) + + mocked_responses.add( + method=responses.PUT, + url=get_schema_endpoint( + resource_group_name=resource_group_name, + registry_name=registry_name, + schema_name=schema_name + ), + json=mock_record, + status=200, + ) + + version_record = get_mock_schema_version_record( + schema_name=schema_name, + name=version_num or 1, + registry_name=registry_name, + resource_group_name=resource_group_name, + ) + mocked_responses.add( + method=responses.PUT, + url=get_schema_version_endpoint( + resource_group_name=resource_group_name, + registry_name=registry_name, + schema_name=schema_name, + schema_version=version_num or 1 + ), + json=version_record, + status=200, + content_type="application/json", + ) + + create_result = create_schema(**create_registry_kwargs) + assert create_result == mock_record + create_payload = json.loads( + mocked_responses.calls[-2].request.body + ) + assert create_payload["properties"]["format"] == SchemaFormat[schema_format].full_value + assert create_payload["properties"]["schemaType"] == SchemaType[schema_type].full_value + assert create_payload["properties"]["description"] == description + assert create_payload["properties"]["displayName"] == display_name + + version_payload = json.loads( + mocked_responses.calls[-1].request.body + ) + version_url = mocked_responses.calls[-1].request.url.split("?")[0] + version_url = version_url.split("/")[-1] + assert version_url == str(version_num or 1) + assert version_payload["properties"]["schemaContent"] == schema_version_content + assert version_payload["properties"]["description"] == version_description + + +def test_version_show(mocked_cmd, mocked_responses: responses): + version_num = randint(1, 10) + schema_name = generate_random_string() + registry_name = generate_random_string() + resource_group_name = generate_random_string() + + mock_record = get_mock_schema_version_record( + schema_name=schema_name, + name=version_num, + registry_name=registry_name, + resource_group_name=resource_group_name, + ) + mocked_responses.add( + method=responses.GET, + url=get_schema_version_endpoint( + resource_group_name=resource_group_name, + registry_name=registry_name, + schema_name=schema_name, + schema_version=version_num + ), + json=mock_record, + status=200, + content_type="application/json", + ) + result = show_version( + cmd=mocked_cmd, + version_name=version_num, + schema_name=schema_name, + schema_registry_name=registry_name, + resource_group_name=resource_group_name + ) + assert result == mock_record + assert len(mocked_responses.calls) == 1 + + +@pytest.mark.parametrize( + "records", + [0, 2], +) +def test_version_list(mocked_cmd, mocked_responses: responses, records: int): + resource_group_name = generate_random_string() + registry_name = generate_random_string() + schema_name = generate_random_string() + mock_records = { + "value": [ + get_mock_schema_version_record( + schema_name=schema_name, + name=randint(0, 10), + registry_name=registry_name, + resource_group_name=resource_group_name, + ) + for _ in range(records) + ] + } + + mocked_responses.add( + method=responses.GET, + url=get_schema_version_endpoint( + resource_group_name=resource_group_name, + registry_name=registry_name, + schema_name=schema_name, + ), + json=mock_records, + status=200, + content_type="application/json", + ) + + result = list(list_versions( + cmd=mocked_cmd, + resource_group_name=resource_group_name, + schema_registry_name=registry_name, + schema_name=schema_name + )) + + assert result == mock_records["value"] + assert len(mocked_responses.calls) == 1 + + +def test_version_remove(mocked_cmd, mocked_responses: responses): + version_num = randint(1, 10) + schema_name = generate_random_string() + registry_name = generate_random_string() + resource_group_name = generate_random_string() + + mocked_responses.add( + method=responses.DELETE, + url=get_schema_version_endpoint( + resource_group_name=resource_group_name, + registry_name=registry_name, + schema_name=schema_name, + schema_version=version_num + ), + status=200, + content_type="application/json", + ) + remove_version( + cmd=mocked_cmd, + version_name=version_num, + schema_name=schema_name, + schema_registry_name=registry_name, + resource_group_name=resource_group_name + ) + assert len(mocked_responses.calls) == 1 + + +@pytest.mark.parametrize("description", [None, generate_random_string()]) +def test_version_add(mocked_cmd, mocked_responses: responses, description: Optional[str]): + version_num = randint(1, 10) + schema_name = generate_random_string() + registry_name = generate_random_string() + resource_group_name = generate_random_string() + schema_version_content = generate_random_string() + + create_registry_kwargs = { + "cmd": mocked_cmd, + "version_name": version_num, + "schema_name": schema_name, + "schema_registry_name": registry_name, + "resource_group_name": resource_group_name, + "description": description, + "schema_version_content": schema_version_content + } + + mock_record = get_mock_schema_version_record( + schema_name=schema_name, + name=version_num, + registry_name=registry_name, + resource_group_name=resource_group_name, + ) + mocked_responses.add( + method=responses.PUT, + url=get_schema_version_endpoint( + resource_group_name=resource_group_name, + registry_name=registry_name, + schema_name=schema_name, + schema_version=version_num + ), + json=mock_record, + status=200, + content_type="application/json", + ) + + create_result = add_version(**create_registry_kwargs) + assert create_result == mock_record + create_payload = json.loads( + mocked_responses.calls[-1].request.body + ) + + assert create_payload["properties"]["schemaContent"] == schema_version_content + assert create_payload["properties"]["description"] == description + + +def test_version_add_error(mocked_cmd): + from azure.cli.core.azclierror import InvalidArgumentValueError + with pytest.raises(InvalidArgumentValueError): + add_version( + cmd=mocked_cmd, + version_name=-1, + schema_name=generate_random_string(), + schema_registry_name=generate_random_string(), + schema_version_content=generate_random_string(), + resource_group_name=generate_random_string() + ) From 1c8505831ae16254b32cd819ae06627e57cf36fa Mon Sep 17 00:00:00 2001 From: Ryan K Date: Mon, 21 Oct 2024 10:45:13 -0700 Subject: [PATCH 07/26] fix: fix build_tree issue if aio_ext does not exist (#414) --- azext_edge/edge/providers/orchestration/deletion.py | 2 +- .../edge/providers/orchestration/resource_map.py | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/azext_edge/edge/providers/orchestration/deletion.py b/azext_edge/edge/providers/orchestration/deletion.py index 8375fdb78..4294219d2 100644 --- a/azext_edge/edge/providers/orchestration/deletion.py +++ b/azext_edge/edge/providers/orchestration/deletion.py @@ -151,7 +151,7 @@ def _process(self, force: bool = False): ) aio_ext_id: str = aio_ext_obj.get("id", "") aio_ext = next( - (_ for _ in self.resource_map.extensions if _.resource_id.lower() == aio_ext_id.lower()), None + (ext for ext in self.resource_map.extensions if ext.resource_id.lower() == aio_ext_id.lower()), None ) if aio_ext: todo_extensions.append(aio_ext) diff --git a/azext_edge/edge/providers/orchestration/resource_map.py b/azext_edge/edge/providers/orchestration/resource_map.py index dbe4d9eb7..b3d90ad6e 100644 --- a/azext_edge/edge/providers/orchestration/resource_map.py +++ b/azext_edge/edge/providers/orchestration/resource_map.py @@ -134,7 +134,7 @@ def refresh_resource_state(self): self._cluster_container = refreshed_cluster_container - def build_tree(self, include_dependencies: bool = False, category_color: str = "cyan") -> Tree: + def build_tree(self, include_dependencies: bool = True, category_color: str = "cyan") -> Tree: from .work import IOT_OPS_EXTENSION_TYPE tree = Tree(f"[green]{self.connected_cluster.cluster_name}") @@ -146,10 +146,11 @@ def build_tree(self, include_dependencies: bool = False, category_color: str = " aio_ext_obj = self.connected_cluster.get_extensions_by_type(IOT_OPS_EXTENSION_TYPE).get( IOT_OPS_EXTENSION_TYPE, {} ) - aio_ext_id: str = aio_ext_obj.get("id", "") - aio_ext = next((_ for _ in self.extensions if _.resource_id.lower() == aio_ext_id.lower()), None) - if aio_ext: - extensions_node.add(aio_ext.display_name) + if aio_ext_obj: + aio_ext_id: str = aio_ext_obj.get("id", "") + aio_ext = next((ext for ext in self.extensions if ext.resource_id.lower() == aio_ext_id.lower()), None) + if aio_ext: + extensions_node.add(aio_ext.display_name) else: [extensions_node.add(ext.display_name) for ext in self.extensions] From cf620a70fcd97dac13b72527b88e2d5b2aaeacea Mon Sep 17 00:00:00 2001 From: Ryan K Date: Tue, 22 Oct 2024 13:11:42 -0700 Subject: [PATCH 08/26] chore: template update to 8.20 (#417) --- azext_edge/edge/providers/orchestration/template.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/azext_edge/edge/providers/orchestration/template.py b/azext_edge/edge/providers/orchestration/template.py index 73c38cdd8..2f3d3bf92 100644 --- a/azext_edge/edge/providers/orchestration/template.py +++ b/azext_edge/edge/providers/orchestration/template.py @@ -56,7 +56,7 @@ def copy(self) -> "TemplateBlueprint": "languageVersion": "2.0", "contentVersion": "1.0.0.0", "metadata": { - "_generator": {"name": "bicep", "version": "0.30.23.60470", "templateHash": "17597461722386619555"} + "_generator": {"name": "bicep", "version": "0.30.23.60470", "templateHash": "16041392394653918543"} }, "definitions": { "_1.AdvancedConfig": { @@ -214,7 +214,7 @@ def copy(self) -> "TemplateBlueprint": "variables": { "VERSIONS": { "platform": "0.7.6", - "secretStore": "0.6.4", + "secretStore": "0.6.7", "containerStorage": "2.2.0", "openServiceMesh": "1.2.10", }, @@ -286,6 +286,9 @@ def copy(self) -> "TemplateBlueprint": "osm.osm.enablePermissiveTrafficPolicy": "false", "osm.osm.featureFlags.enableWASMStats": "false", "osm.osm.configResyncInterval": "10s", + "osm.osm.osmController.resource.requests.cpu": "100m", + "osm.osm.osmBootstrap.resource.requests.cpu": "100m", + "osm.osm.injector.resource.requests.cpu": "100m", }, }, "dependsOn": ["cluster"], @@ -297,7 +300,7 @@ def copy(self) -> "TemplateBlueprint": "name": "azure-arc-containerstorage", "identity": {"type": "SystemAssigned"}, "properties": { - "extensionType": "Microsoft.Arc.ContainerStorage", + "extensionType": "microsoft.arc.containerstorage", "autoUpgradeMinorVersion": False, "version": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'edgeStorageAccelerator'), 'version'), variables('VERSIONS').containerStorage)]", "releaseTrain": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'edgeStorageAccelerator'), 'train'), variables('TRAINS').containerStorage)]", @@ -355,7 +358,7 @@ def copy(self) -> "TemplateBlueprint": "languageVersion": "2.0", "contentVersion": "1.0.0.0", "metadata": { - "_generator": {"name": "bicep", "version": "0.30.23.60470", "templateHash": "8789011211011918491"} + "_generator": {"name": "bicep", "version": "0.30.23.60470", "templateHash": "15107151171792030409"} }, "definitions": { "_1.AdvancedConfig": { @@ -527,7 +530,7 @@ def copy(self) -> "TemplateBlueprint": "variables": { "AIO_EXTENSION_SUFFIX": "[take(uniqueString(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName'))), 5)]", "AIO_EXTENSION_SCOPE": {"cluster": {"releaseNamespace": "azure-iot-operations"}}, - "VERSIONS": {"iotOperations": "0.8.16"}, + "VERSIONS": {"iotOperations": "0.8.20"}, "TRAINS": {"iotOperations": "integration"}, "MQTT_SETTINGS": { "brokerListenerServiceName": "aio-broker", From a792d9a21d67e7e5b8d10067617cd2766c8aec74 Mon Sep 17 00:00:00 2001 From: Victoria Litvinova <73560279+vilit1@users.noreply.github.com> Date: Tue, 22 Oct 2024 17:10:12 -0700 Subject: [PATCH 09/26] feat: add `az iot ops upgrade` (#416) --- azext_edge/edge/_help.py | 28 +- azext_edge/edge/command_map.py | 1 + azext_edge/edge/commands_edge.py | 21 + azext_edge/edge/params.py | 9 + .../orchestration/connected_cluster.py | 1 - .../orchestration/resources/clusters.py | 18 + .../edge/providers/orchestration/upgrade.py | 305 +++++++++++ .../edge/providers/orchestration/work.py | 5 +- .../edge/orchestration/test_upgrade_unit.py | 475 ++++++++++++++++++ setup.py | 1 + 10 files changed, 861 insertions(+), 3 deletions(-) create mode 100644 azext_edge/edge/providers/orchestration/upgrade.py create mode 100644 azext_edge/tests/edge/orchestration/test_upgrade_unit.py diff --git a/azext_edge/edge/_help.py b/azext_edge/edge/_help.py index 86b7b76fc..77a93839c 100644 --- a/azext_edge/edge/_help.py +++ b/azext_edge/edge/_help.py @@ -562,6 +562,8 @@ def load_iotops_help(): long-summary: Optionally the command can output a tree structure of associated resources representing the IoT Operations deployment against the backing cluster. + If this command fails, please use `az iot ops upgrade` to upgrade your instance to the latest version before continuing. + examples: - name: Basic usage to show an instance. text: > @@ -595,7 +597,7 @@ def load_iotops_help(): ] = """ type: command short-summary: Update an IoT Operations instance. - long-summary: Currently instance tags and description can be updated. + long-summary: Currently instance tags and description can be updated. If you want to upgrade your instance to a newer version, please use `az iot ops upgrade` instead. examples: - name: Update instance tags. This is equivalent to a replace. @@ -609,6 +611,30 @@ def load_iotops_help(): az iot ops update --name myinstance -g myresourcegroup --desc "Fabrikam Widget Factory B42" """ + helps[ + "iot ops upgrade" + ] = """ + type: command + short-summary: Upgrade an IoT Operations instance to the latest version. + long-summary: | + WARNING: This command may fail and require you to delete and re-create your cluster and instance. + + Upgrade an IoT Operations instance, including updating the extensions to the latest versions. + Use this command if `az iot ops show` or similiar commands are failing. + + Schema registry resource Id is an optional parameter and may be required in specific scenarios. + examples: + - name: Upgrade the instance with minimal inputs. + text: > + az iot ops upgrade --name myinstance -g myresourcegroup + - name: Skip the conformation prompt during instance upgrade. + text: > + az iot ops upgrade --name myinstance -g myresourcegroup -y + - name: Upgrade the instance and specify the schema registry resource Id. + text: > + az iot ops upgrade --name myinstance -g myresourcegroup --sr-resource-id $SCHEMA_REGISTRY_RESOURCE_ID + """ + helps[ "iot ops identity" ] = """ diff --git a/azext_edge/edge/command_map.py b/azext_edge/edge/command_map.py index 44a7a853e..138fdbe9b 100644 --- a/azext_edge/edge/command_map.py +++ b/azext_edge/edge/command_map.py @@ -29,6 +29,7 @@ def load_iotops_commands(self, _): ) as cmd_group: cmd_group.command("check", "check") cmd_group.command("init", "init") + cmd_group.command("upgrade", "upgrade") cmd_group.command("create", "create_instance") cmd_group.command("update", "update_instance") cmd_group.show_command("show", "show_instance") diff --git a/azext_edge/edge/commands_edge.py b/azext_edge/edge/commands_edge.py index e79e20014..9402977f8 100644 --- a/azext_edge/edge/commands_edge.py +++ b/azext_edge/edge/commands_edge.py @@ -134,6 +134,27 @@ def init( ) +def upgrade( + cmd, + resource_group_name: str, + instance_name: str, + schema_registry_resource_id: Optional[str] = None, + no_progress: Optional[bool] = None, + confirm_yes: Optional[bool] = None, + **kwargs +): + from .providers.orchestration.upgrade import upgrade_ops_resources + return upgrade_ops_resources( + cmd=cmd, + resource_group_name=resource_group_name, + instance_name=instance_name, + sr_resource_id=schema_registry_resource_id, + no_progress=no_progress, + confirm_yes=confirm_yes, + **kwargs + ) + + def create_instance( cmd, cluster_name: str, diff --git a/azext_edge/edge/params.py b/azext_edge/edge/params.py index 674c76065..f4043ee88 100644 --- a/azext_edge/edge/params.py +++ b/azext_edge/edge/params.py @@ -540,6 +540,15 @@ def load_iotops_arguments(self, _): arg_group="Trust", ) + with self.argument_context("iot ops upgrade") as context: + # Schema Registry + context.argument( + "schema_registry_resource_id", + options_list=["--sr-resource-id"], + help="The schema registry resource Id to use with IoT Operations. Required if the schema registry " + "resource Id is no longer found within IoT Operations.", + ) + with self.argument_context("iot ops delete") as context: context.argument( "include_dependencies", diff --git a/azext_edge/edge/providers/orchestration/connected_cluster.py b/azext_edge/edge/providers/orchestration/connected_cluster.py index 05e81a2c6..bd60d5304 100644 --- a/azext_edge/edge/providers/orchestration/connected_cluster.py +++ b/azext_edge/edge/providers/orchestration/connected_cluster.py @@ -5,7 +5,6 @@ # ---------------------------------------------------------------------------------------------- from typing import List, Optional, Union, Dict - from ...util.resource_graph import ResourceGraph diff --git a/azext_edge/edge/providers/orchestration/resources/clusters.py b/azext_edge/edge/providers/orchestration/resources/clusters.py index 6052bf771..3bcfda5f6 100644 --- a/azext_edge/edge/providers/orchestration/resources/clusters.py +++ b/azext_edge/edge/providers/orchestration/resources/clusters.py @@ -11,6 +11,7 @@ from ....util.az_client import ( get_clusterconfig_mgmt_client, get_connectedk8s_mgmt_client, + wait_for_terminal_state ) from ....util.queryable import Queryable @@ -53,3 +54,20 @@ def list(self, resource_group_name: str, cluster_name: str) -> Iterable[dict]: cluster_resource_name="connectedClusters", cluster_name=cluster_name, ) + + # will be removed + def update_cluster_extension( + self, + resource_group_name: str, + cluster_name: str, + extension_name: str, + update_payload: dict, + ) -> Iterable[dict]: + return wait_for_terminal_state(self.ops.begin_update( + resource_group_name=resource_group_name, + cluster_rp="Microsoft.Kubernetes", + cluster_resource_name="connectedClusters", + cluster_name=cluster_name, + extension_name=extension_name, + patch_extension=update_payload + )) diff --git a/azext_edge/edge/providers/orchestration/upgrade.py b/azext_edge/edge/providers/orchestration/upgrade.py new file mode 100644 index 000000000..368125cbc --- /dev/null +++ b/azext_edge/edge/providers/orchestration/upgrade.py @@ -0,0 +1,305 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- + +from time import sleep +from typing import List, Optional, OrderedDict + +from azure.cli.core.azclierror import ( + ArgumentUsageError, + AzureResponseError, + RequiredArgumentMissingError, +) +from azure.core.exceptions import HttpResponseError, ResourceNotFoundError +from knack.log import get_logger +from rich import print +from rich.console import NewLine +from rich.live import Live +from rich.padding import Padding +from rich.progress import Progress, SpinnerColumn, TimeElapsedColumn +from rich.table import Table + +from ...util.az_client import get_resource_client, wait_for_terminal_state +from ...util.common import should_continue_prompt +from .resource_map import IoTOperationsResourceMap +from .resources import Instances + +logger = get_logger(__name__) +INSTANCE_7_API = "2024-08-15-preview" + + +def upgrade_ops_resources( + cmd, + resource_group_name: str, + instance_name: Optional[str] = None, + cluster_name: Optional[str] = None, + sr_resource_id: Optional[str] = None, + confirm_yes: Optional[bool] = None, + no_progress: Optional[bool] = None, +): + manager = UpgradeManager( + cmd=cmd, + instance_name=instance_name, + cluster_name=cluster_name, + sr_resource_id=sr_resource_id, + resource_group_name=resource_group_name, + no_progress=no_progress, + ) + return manager.do_work(confirm_yes=confirm_yes) + + +# keeping this separate for easier removal once no longer needed +class UpgradeManager: + def __init__( + self, + cmd, + resource_group_name: str, + instance_name: Optional[str] = None, + cluster_name: Optional[str] = None, + sr_resource_id: Optional[str] = None, + no_progress: Optional[bool] = None, + ): + from azure.cli.core.commands.client_factory import get_subscription_id + + self.cmd = cmd + self.instance_name = instance_name + self.cluster_name = cluster_name + self.sr_resource_id = sr_resource_id + self.resource_group_name = resource_group_name + self.instances = Instances(self.cmd) + self.subscription_id = get_subscription_id(cli_ctx=cmd.cli_ctx) + self.resource_client = get_resource_client(self.subscription_id) + + self._render_progress = not no_progress + self._live = Live(None, transient=False, refresh_per_second=8, auto_refresh=self._render_progress) + self._progress_bar = Progress( + SpinnerColumn(), + *Progress.get_default_columns(), + "Elapsed:", + TimeElapsedColumn(), + transient=False, + ) + self._progress_shown = False + + def do_work(self, confirm_yes: Optional[bool] = None): + # get the resource map from the instance (checks if update is needed for instance) + self.resource_map = self._get_resource_map() + # Ensure cluster exists with existing resource_map pattern. + self.resource_map.connected_cluster.resource + self.cluster_name = self.resource_map.connected_cluster.cluster_name + + # get the extensions to update, populate the expected patches + extension_text = self._check_extensions() + + if not self.extensions_to_update and not self.require_instance_upgrade: + print("[green]Nothing to upgrade :)[/green]") + return + + print("Azure IoT Operations Upgrade") + print() + if self.extensions_to_update: + print(Padding("Extensions to update:", (0, 0, 0, 2))) + print(Padding(extension_text, (0, 0, 0, 4))) + + if self.require_instance_upgrade: + print(Padding( + "Old Azure IoT Operations instance version found. Will update the instance to the latest version.", + (0, 0, 0, 2) + )) + + print() + print("[yellow]Upgrading may fail and require you to delete and re-create your cluster.[/yellow]") + + should_bail = not should_continue_prompt(confirm_yes=confirm_yes) + if should_bail: + return + + # do the work - get the schema reg id if needed, do the updates + return self._process() + + def _check_extensions(self) -> str: + from packaging import version + from .template import M3_ENABLEMENT_TEMPLATE, M3_INSTANCE_TEMPLATE + version_map = M3_ENABLEMENT_TEMPLATE.content["variables"]["VERSIONS"].copy() + version_map.update(M3_INSTANCE_TEMPLATE.content["variables"]["VERSIONS"].copy()) + train_map = M3_ENABLEMENT_TEMPLATE.content["variables"]["TRAINS"].copy() + train_map.update(M3_INSTANCE_TEMPLATE.content["variables"]["TRAINS"].copy()) + + self.new_aio_version = version_map["iotOperations"] + + # note that the secret store type changes but somehow it all works out :) + # the order is determined by depends on in the template + type_to_key_map = OrderedDict([ + ("microsoft.iotoperations.platform", "platform"), + ("microsoft.openservicemesh", "openServiceMesh"), + ("microsoft.azure.secretstore", "secretStore"), + ("microsoft.arc.containerstorage", "containerStorage"), + ("microsoft.iotoperations", "iotOperations"), + ]) + # order the extension list with the same order as above map + aio_extensions: List[dict] = self.resource_map.connected_cluster.extensions + type_to_aio_extensions = {ext["properties"]["extensionType"].lower(): ext for ext in aio_extensions} + ordered_aio_extensions = OrderedDict({ + ext_type: type_to_aio_extensions[ext_type] for ext_type in type_to_key_map + }) + # make sure order is kept + self.extensions_to_update = OrderedDict() + for extension_type, extension in ordered_aio_extensions.items(): + extension_key = type_to_key_map[extension_type] + current_version = extension["properties"].get("version", "0") + current_train = extension["properties"].get("releaseTrain", "").lower() + + extension_update = { + "properties" : { + "autoUpgradeMinorVersion": "false", + "releaseTrain": train_map[extension_key], + "version": version_map[extension_key] + } + } + + if extension_type == "microsoft.openservicemesh": + # hard code to avoid actual template resources parsing + extension_update["properties"]["configurationSettings"] = { + "osm.osm.osmController.resource.requests.cpu": "100m", + "osm.osm.osmBootstrap.resource.requests.cpu": "100m", + "osm.osm.injector.resource.requests.cpu": "100m", + } + + # should still be fine for mesh - if it is at the current version, already, it should have these props + # worst case it the extra config settings do nothing + if all([ + version.parse(current_version) >= version.parse(version_map[extension_key]), + train_map[extension_key].lower() == current_train + ]): + logger.info(f"Extension {extension['name']} is already up to date.") + continue + self.extensions_to_update[extension["name"]] = extension_update + + # try to get the sr resource id if not present already + extension_props = type_to_aio_extensions["microsoft.iotoperations"]["properties"] + if not self.sr_resource_id: + self.sr_resource_id = extension_props.get("configurationSettings", {}).get( + "schemaRegistry.values.resourceId" + ) + # text to print (ordered) + display_desc = "[dim]" + for extension, update in self.extensions_to_update.items(): + version = update["properties"]["version"] + display_desc += f"• {extension}: {version}\n" + return display_desc[:-1] + "" + + def _get_resource_map(self) -> IoTOperationsResourceMap: + self.require_instance_upgrade = True + # try with 2024-08-15-preview -> it is m2 + try: + self.instance = self.resource_client.resources.get( + resource_group_name=self.resource_group_name, + parent_resource_path="", + resource_provider_namespace="Microsoft.IoTOperations", + resource_type="instances", + resource_name=self.instance_name, + api_version=INSTANCE_7_API + ) + return self.instances.get_resource_map(self.instance) + except HttpResponseError: + self.require_instance_upgrade = False + # try with 2024-09-15-preview -> it is m3 already + try: + self.instance = self.instances.show( + name=self.instance_name, + resource_group_name=self.resource_group_name + ) + return self.instances.get_resource_map(self.instance) + except ResourceNotFoundError as e: + raise e + except HttpResponseError: + raise ArgumentUsageError( + f"Cannot upgrade instance {self.instance_name}, please delete your instance, including " + "dependencies, and reinstall." + ) + + def _render_display(self, description: str): + if self._render_progress: + grid = Table.grid(expand=False) + grid.add_column() + grid.add_row(NewLine(1)) + grid.add_row(description) + grid.add_row(NewLine(1)) + grid.add_row(self._progress_bar) + + if not self._progress_shown: + self._task_id = self._progress_bar.add_task(description="Work.", total=None) + self._progress_shown = True + self._live.update(grid, refresh=True) + + if not self._live.is_started: + self._live.start(True) + + def _stop_display(self): + if self._render_progress and self._live.is_started: + if self._progress_shown: + self._progress_bar.update(self._task_id, description="Done.") + sleep(0.5) + self._live.stop() + + def _process(self): + if self.require_instance_upgrade: + # m3 extensions should not have the reg id + if not self.sr_resource_id: + raise RequiredArgumentMissingError( + "Cannot determine the schema registry id from installed extensions, please provide the schema " + "registry id via `--sr-id`." + ) + + # prep the instance + self.instance.pop("systemData", None) + inst_props = self.instance["properties"] + inst_props["schemaRegistryRef"] = {"resourceId": self.sr_resource_id} + inst_props["version"] = self.new_aio_version + inst_props.pop("schemaRegistryNamespace", None) + inst_props.pop("components", None) + + result = None + try: + # Do the extension upgrade, try to keep the sr resource id + if self.extensions_to_update: + self._render_display("[yellow]Updating extensions...") + for extension in self.extensions_to_update: + logger.info(f"Updating extension {extension}.") + logger.info(f"Extension PATCH body: {self.extensions_to_update[extension]}") + updated = self.resource_map.connected_cluster.clusters.extensions.update_cluster_extension( + resource_group_name=self.resource_group_name, + cluster_name=self.cluster_name, + extension_name=extension, + update_payload=self.extensions_to_update[extension] + ) + # check for hidden errors + for status in updated["properties"].get("statuses", []): + if status["code"] == "InstallationFailed": + raise AzureResponseError( + f"Updating extension {extension} failed with the error message: {status['message']}" + ) + + if self.require_instance_upgrade: + # update the instance + minimize the code to be taken out once this is no longer needed + self._render_display("[yellow]Updating instance...") + logger.info(f"New instance body: {self.instance}") + result = wait_for_terminal_state( + self.instances.iotops_mgmt_client.instance.begin_create_or_update( + resource_group_name=self.resource_group_name, + instance_name=self.instance_name, + resource=self.instance + ) + ) + except (HttpResponseError, KeyboardInterrupt) as e: + if self.require_instance_upgrade: + logger.error( + f"Update failed. The collected schema registry resource id is `{self.sr_resource_id}`. " + "Please save this value in case it is required for a future upgrade. " + ) + raise e + finally: + self._stop_display() + return result diff --git a/azext_edge/edge/providers/orchestration/work.py b/azext_edge/edge/providers/orchestration/work.py index c261d073f..273e7e8ba 100644 --- a/azext_edge/edge/providers/orchestration/work.py +++ b/azext_edge/edge/providers/orchestration/work.py @@ -139,7 +139,9 @@ def _format_instance_desc(self) -> str: def _build_display(self): pre_check_cat_desc = "Pre-Flight" self._display.add_category(WorkCategoryKey.PRE_FLIGHT, pre_check_cat_desc, skipped=not self._pre_flight) - self._display.add_step(WorkCategoryKey.PRE_FLIGHT, WorkStepKey.REG_RP, "Ensure registered resource providers") + self._display.add_step( + WorkCategoryKey.PRE_FLIGHT, WorkStepKey.REG_RP, "Ensure registered resource providers" + ) self._display.add_step( WorkCategoryKey.PRE_FLIGHT, WorkStepKey.ENUMERATE_PRE_FLIGHT, "Enumerate pre-flight checks" ) @@ -258,6 +260,7 @@ def _do_work(self): # noqa: C901 # Pre-Flight workflow if self._pre_flight: + # WorkStepKey.REG_RP self.render_display(category=WorkCategoryKey.PRE_FLIGHT, active_step=WorkStepKey.REG_RP) register_providers(self.subscription_id) diff --git a/azext_edge/tests/edge/orchestration/test_upgrade_unit.py b/azext_edge/tests/edge/orchestration/test_upgrade_unit.py new file mode 100644 index 000000000..5ea33a29e --- /dev/null +++ b/azext_edge/tests/edge/orchestration/test_upgrade_unit.py @@ -0,0 +1,475 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- + +from packaging import version +from typing import Dict, List, Optional, OrderedDict +from unittest.mock import Mock +import pytest +import responses +from azure.cli.core.azclierror import ( + ArgumentUsageError, + AzureResponseError, + RequiredArgumentMissingError, +) +from azure.core.exceptions import HttpResponseError + +from ...generators import generate_random_string, get_zeroed_subscription + +# Note this keeps the order +VAR_TO_TYPE_MAP = OrderedDict([ + ("platform", "microsoft.iotoperations.platform"), + ("open_service_mesh", "microsoft.openservicemesh"), + ("secret_store", "microsoft.azure.secretstore"), + ("container_storage", "microsoft.arc.containerstorage"), + ("iot_operations", "microsoft.iotoperations"), +]) + + +@pytest.fixture +def mocked_instances(mocker): + patched = mocker.patch( + "azext_edge.edge.providers.orchestration.upgrade.Instances", + ) + yield patched() + + +@pytest.fixture +def mocked_logger(mocker): + yield mocker.patch( + "azext_edge.edge.providers.orchestration.upgrade.logger", + ) + + +@pytest.fixture +def mocked_wait_for_terminal_state(mocker): + yield mocker.patch("azext_edge.edge.providers.orchestration.upgrade.wait_for_terminal_state", autospec=True) + + +@pytest.fixture +def mocked_rich_print(mocker): + yield mocker.patch("azext_edge.edge.providers.orchestration.upgrade.print") + + +@pytest.fixture +def mocked_live_display(mocker): + yield mocker.patch("azext_edge.edge.providers.orchestration.upgrade.Live") + + +@pytest.fixture +def spy_upgrade_manager(mocker): + from azext_edge.edge.providers.orchestration.upgrade import UpgradeManager + + yield { + "_check_extensions": mocker.spy(UpgradeManager, "_check_extensions"), + "_get_resource_map": mocker.spy(UpgradeManager, "_get_resource_map"), + "_render_display": mocker.spy(UpgradeManager, "_render_display"), + "_stop_display": mocker.spy(UpgradeManager, "_stop_display"), + "_process": mocker.spy(UpgradeManager, "_process"), + } + + +def _assemble_template_mock(mocker, new_versions, new_trains): + # order doesnt matter here + variable_to_key_map = { + "secret_store": "secretStore", + "container_storage": "containerStorage", + "open_service_mesh": "openServiceMesh", + "platform": "platform", + "iot_operations": "iotOperations", + } + versions = {variable_to_key_map[v]: new_versions[v] for v in variable_to_key_map} + trains = {variable_to_key_map[v]: new_trains[v] for v in variable_to_key_map} + inst_temp_patch = mocker.patch("azext_edge.edge.providers.orchestration.template.M3_INSTANCE_TEMPLATE") + inst_temp_patch.content = { + "variables": { + "VERSIONS": {"iotOperations": versions.pop("iotOperations")}, + "TRAINS": {"iotOperations": trains.pop("iotOperations")} + } + } + + enable_temp_patch = mocker.patch("azext_edge.edge.providers.orchestration.template.M3_ENABLEMENT_TEMPLATE") + enable_temp_patch.content = { + "variables": { + "VERSIONS": versions, + "TRAINS": trains + } + } + + +def _generate_extensions(**extension_version_map) -> OrderedDict: + # if nothing is provided, "min" is used + # order is determined from the VAR_TO_TYPE_MAP - ensure order is kept + extensions = OrderedDict() + for key in VAR_TO_TYPE_MAP: + version = extension_version_map.get(key, "0.0.0") + extensions[key] = { + "properties": { + "extensionType": VAR_TO_TYPE_MAP[key], + "version": version, + "releaseTrain": "preview", + "configurationSettings": { + "schemaRegistry.values.resourceId": generate_random_string() + } + }, + "name": generate_random_string(), + } + return extensions + + +# TODO: if not used for m3 - simplify +def _generate_instance(instance_name: str, resource_group: str, m3: bool = False): + mock_instance_record = { + "extendedLocation": { + "name": generate_random_string(), + "type": "CustomLocation" + }, + "id": f"/subscriptions/{get_zeroed_subscription()}/resourcegroups/{resource_group}" + f"/providers/Microsoft.Kubernetes/connectedClusters/{instance_name}", + "identity": {"type": "None"}, + "location": "eastus2", + "name": instance_name, + "properties": { + "description": generate_random_string(), + "provisioningState": "Succeeded", + "version": "0.7.31" + }, + "resourceGroup": resource_group, + "systemData": { + generate_random_string(): generate_random_string(), + generate_random_string(): generate_random_string(), + }, + "type": "microsoft.iotoperations/instances" + } + if m3: + mock_instance_record["properties"]["schemaRegistryRef"] = {"resource_id": generate_random_string()} + else: + mock_instance_record["properties"]["schemaRegistryNamespace"] = generate_random_string() + mock_instance_record["properties"]["components"] = { + "adr": { + "state": "Enabled" + }, + "akri": { + "state": "Enabled" + }, + "connectors": { + "state": "Enabled" + }, + "dataflows": { + "state": "Enabled" + }, + "schemaRegistry": { + "state": "Enabled" + } + } + return mock_instance_record + + +def _generate_versions(**versions) -> dict: + # if version not provided, "max" is used + return {key: versions.get(key, "255.255.255") for key in VAR_TO_TYPE_MAP} + + +def _generate_trains(**trains) -> dict: + # if train is not provided, "max" is used + return {key: trains.get(key, "stable") for key in VAR_TO_TYPE_MAP} + + +@pytest.mark.parametrize("no_progress", [False, True]) +@pytest.mark.parametrize("require_instance_update", [False, True]) +@pytest.mark.parametrize("current_extensions, new_versions, new_trains", [ + # update none + ( + _generate_extensions( + secret_store="255.255.255", + container_storage="255.255.255", + open_service_mesh="255.255.255", + platform="255.255.255", + iot_operations="255.255.255", + ), + _generate_versions(), + _generate_trains( + secret_store="preview", + container_storage="preview", + open_service_mesh="preview", + platform="preview", + iot_operations="preview", + ) + ), + # update aio (version) + platform (train) + ( + _generate_extensions( + secret_store="1.10.0", + container_storage="0.10.3-preview", + open_service_mesh="0.9.1", + platform="0.10.0", + iot_operations="0.8.16", + ), + _generate_versions( + secret_store="1.10.0", + container_storage="0.10.3-preview", + open_service_mesh="0.9.1", + platform="0.10.0", + iot_operations="0.8.20", + ), + _generate_trains( + secret_store="preview", + container_storage="preview", + open_service_mesh="preview", + iot_operations="preview", + ) + ), + # update aio, openmesh, container store (new versions) + ( + _generate_extensions( + secret_store="1.10.0", + container_storage="0.10.3-preview", + open_service_mesh="0.9.1", + platform="0.10.0", + iot_operations="0.7.31", + ), + _generate_versions( + secret_store="1.10.0", + container_storage="0.10.3", + open_service_mesh="0.10.2", + platform="0.10.0", + iot_operations="0.8.16", + ), + _generate_trains( + secret_store="preview", + container_storage="preview", + open_service_mesh="preview", + platform="preview", + iot_operations="preview", + ) + ), + # update all + (_generate_extensions(), _generate_versions(), _generate_trains()) +]) +@pytest.mark.parametrize("sr_resource_id", [None, generate_random_string()]) +def test_upgrade_lifecycle( + mocker, + mocked_cmd: Mock, + mocked_responses: responses, + mocked_instances: Mock, + mocked_wait_for_terminal_state: Mock, + mocked_live_display: Mock, + mocked_logger: Mock, + mocked_rich_print: Mock, + spy_upgrade_manager: Dict[str, Mock], + require_instance_update: bool, + current_extensions: List[dict], + new_versions: List[dict], + new_trains: List[dict], + sr_resource_id: Optional[str], + no_progress: Optional[bool] +): + from azext_edge.edge.providers.orchestration.upgrade import upgrade_ops_resources + + rg_name = generate_random_string() + instance_name = generate_random_string() + + # mock extensions in resource map and template info + mocked_resource_map = mocked_instances.get_resource_map() + mocked_resource_map.connected_cluster.extensions = list(current_extensions.values()) + extension_update_mock = mocked_resource_map.connected_cluster.clusters.extensions.update_cluster_extension + _assemble_template_mock(mocker, new_versions=new_versions, new_trains=new_trains) + m2_instance = None + # the get m2 instance call + if require_instance_update: + m2_instance = _generate_instance(instance_name=instance_name, resource_group=rg_name) + # note the resource client adds an extra / before instances for the parent path. The api doesnt care + mocked_responses.add( + method=responses.GET, + url=f"https://management.azure.com/subscriptions/{get_zeroed_subscription()}/resourcegroups/{rg_name}" + f"/providers/Microsoft.IoTOperations//instances/{instance_name}?api-version=2024-08-15-preview", + json=m2_instance, + status=200, + content_type="application/json", + ) + else: + mocked_responses.add( + method=responses.GET, + url=f"https://management.azure.com/subscriptions/{get_zeroed_subscription()}/resourcegroups/{rg_name}" + f"/providers/Microsoft.IoTOperations//instances/{instance_name}?api-version=2024-08-15-preview", + status=404, + content_type="application/json", + ) + # no need to provide valid value for instance show since it will not be used + + kwargs = { + "cmd": mocked_cmd, + "instance_name": instance_name, + "resource_group_name": rg_name, + "sr_resource_id": sr_resource_id, + "confirm_yes": True, + "no_progress": no_progress, + } + + upgrade_ops_resources(**kwargs) + + # no matter what, we always try the m2 get + assert len(mocked_responses.calls) == 1 + # extension update calls + extensions_to_update = {} + extension_update_calls = extension_update_mock.call_args_list + call = 0 + for key, extension in current_extensions.items(): + if any([ + version.parse(extension["properties"]["version"]) < version.parse(new_versions[key]), + extension["properties"]["releaseTrain"] != new_trains[key] + ]): + extensions_to_update[key] = extension + # check the extension + extension_call = extension_update_calls[call].kwargs + assert extension_call["resource_group_name"] == rg_name + assert extension_call["cluster_name"] + assert extension_call["cluster_name"] + payload = extension_call["update_payload"] + assert payload["properties"] + assert payload["properties"]["autoUpgradeMinorVersion"] == "false" + assert payload["properties"]["releaseTrain"] == new_trains[key] + assert payload["properties"]["version"] == new_versions[key] + + if key == "microsoft.openservicemesh": + assert payload["properties"]["configurationSettings"] + # calls should be ordered together + call += 1 + + assert len(extensions_to_update) == len(extension_update_calls) + + # overall upgrade call + assert spy_upgrade_manager["_process"].called is bool(extensions_to_update or require_instance_update) + + if require_instance_update: + update_args = mocked_instances.iotops_mgmt_client.instance.begin_create_or_update.call_args.kwargs + update_body = update_args["resource"] + + # props that were kept the same + for prop in ["extendedLocation", "id", "name", "location", "resourceGroup", "type"]: + assert update_body[prop] == m2_instance[prop] + for prop in ["description", "provisioningState"]: + assert update_body["properties"][prop] == m2_instance["properties"][prop] + + # props that were removed + assert "systemData" not in update_body + assert "schemaRegistryNamespace" not in update_body["properties"] + assert "components" not in update_body["properties"] + + # props that were added/changed - also ensure right sr id is used + assert update_body["properties"]["version"] == new_versions["iot_operations"] + aio_ext_props = current_extensions["iot_operations"]["properties"] + assert update_body["properties"]["schemaRegistryRef"]["resourceId"] == ( + sr_resource_id or aio_ext_props["configurationSettings"]["schemaRegistry.values.resourceId"] + ) + else: + # make sure we tried to get the m3 + mocked_instances.show.assert_called() + mocked_instances.iotops_mgmt_client.instance.begin_create_or_update.assert_not_called() + + # no progress check + if kwargs["no_progress"]: + mocked_live_display.assert_called_once_with(None, transient=False, refresh_per_second=8, auto_refresh=False) + + +def test_upgrade_error( + mocker, + mocked_cmd: Mock, + mocked_responses: responses, + mocked_instances: Mock, + mocked_wait_for_terminal_state: Mock, + mocked_live_display: Mock, + mocked_logger: Mock, + mocked_rich_print: Mock, +): + from azext_edge.edge.providers.orchestration.upgrade import upgrade_ops_resources + + rg_name = generate_random_string() + instance_name = generate_random_string() + m2_instance = _generate_instance(instance_name=instance_name, resource_group=rg_name) + kwargs = { + "cmd": mocked_cmd, + "instance_name": instance_name, + "resource_group_name": rg_name, + "confirm_yes": True, + } + extensions = _generate_extensions() + mocked_resource_map = mocked_instances.get_resource_map() + mocked_resource_map.connected_cluster.extensions = list(extensions.values()) + + # slowly work backwards + # instance update fails + mocked_responses.add( + method=responses.GET, + url=f"https://management.azure.com/subscriptions/{get_zeroed_subscription()}/resourcegroups/{rg_name}" + f"/providers/Microsoft.IoTOperations//instances/{instance_name}?api-version=2024-08-15-preview", + json=m2_instance, + status=200, + content_type="application/json", + ) + mocked_instances.iotops_mgmt_client.instance.begin_create_or_update.side_effect = HttpResponseError( + "instance update failed" + ) + with pytest.raises(HttpResponseError) as e: + upgrade_ops_resources(**kwargs) + + # some random extension has a hidden status error + mocked_responses.add( + method=responses.GET, + url=f"https://management.azure.com/subscriptions/{get_zeroed_subscription()}/resourcegroups/{rg_name}" + f"/providers/Microsoft.IoTOperations//instances/{instance_name}?api-version=2024-08-15-preview", + json=m2_instance, + status=200, + content_type="application/json", + ) + error_msg = generate_random_string() + extensions["platform"]["properties"]["statuses"] = [{"code": "InstallationFailed", "message": error_msg}] + extension_update_mock = mocked_resource_map.connected_cluster.clusters.extensions.update_cluster_extension + extension_update_mock.return_value = extensions["platform"] + with pytest.raises(AzureResponseError) as e: + upgrade_ops_resources(**kwargs) + assert error_msg in e.value.error_msg + assert extensions["platform"]["name"] in e.value.error_msg + + # extension update fails + mocked_responses.add( + method=responses.GET, + url=f"https://management.azure.com/subscriptions/{get_zeroed_subscription()}/resourcegroups/{rg_name}" + f"/providers/Microsoft.IoTOperations//instances/{instance_name}?api-version=2024-08-15-preview", + json=m2_instance, + status=200, + content_type="application/json", + ) + extension_update_mock.side_effect = HttpResponseError( + "extension update failed" + ) + with pytest.raises(HttpResponseError): + upgrade_ops_resources(**kwargs) + + # need to update the instance but cannot get the sr resource id + mocked_responses.add( + method=responses.GET, + url=f"https://management.azure.com/subscriptions/{get_zeroed_subscription()}/resourcegroups/{rg_name}" + f"/providers/Microsoft.IoTOperations//instances/{instance_name}?api-version=2024-08-15-preview", + json=m2_instance, + status=200, + content_type="application/json", + ) + [ext["properties"].pop("configurationSettings") for ext in mocked_resource_map.connected_cluster.extensions] + + with pytest.raises(RequiredArgumentMissingError): + upgrade_ops_resources(**kwargs) + + # cannot get m2 or m3 + mocked_responses.add( + method=responses.GET, + url=f"https://management.azure.com/subscriptions/{get_zeroed_subscription()}/resourcegroups/{rg_name}" + f"/providers/Microsoft.IoTOperations//instances/{instance_name}?api-version=2024-08-15-preview", + status=404, + content_type="application/json", + ) + mocked_instances.show.side_effect = HttpResponseError("instance get failed") + with pytest.raises(ArgumentUsageError): + upgrade_ops_resources(**kwargs) diff --git a/setup.py b/setup.py index 0a756401e..f80ee6cf0 100644 --- a/setup.py +++ b/setup.py @@ -28,6 +28,7 @@ DEPENDENCIES = [ + "packaging", "rich>=13.6,<14.0", "kubernetes>=27.2,<29.0", "azure-identity>=1.14.1,<1.18.0", From 204bb49997ebb12bdbf56a745e5bfe0d34336818 Mon Sep 17 00:00:00 2001 From: Ryan K Date: Tue, 22 Oct 2024 17:11:36 -0700 Subject: [PATCH 10/26] refactor: update int tests to move init checks to create (#418) --- .../tests/edge/init/int/test_init_int.py | 153 +++++++----------- 1 file changed, 62 insertions(+), 91 deletions(-) diff --git a/azext_edge/tests/edge/init/int/test_init_int.py b/azext_edge/tests/edge/init/int/test_init_int.py index c0e2d37ea..bf4445bf2 100644 --- a/azext_edge/tests/edge/init/int/test_init_int.py +++ b/azext_edge/tests/edge/init/int/test_init_int.py @@ -44,17 +44,16 @@ def init_test_setup(settings, tracked_resources): "--enable-hierarchical-namespace --public-network-access Disabled " "--allow-shared-key-access false --allow-blob-public-access false --default-action Deny" ) - tracked_resources.append(storage_account['id']) + tracked_resources.append(storage_account["id"]) registry = run( f"az iot ops schema registry create -n {registry_name} -g {settings.env.azext_edge_rg} " - f"--rn {registry_namespace} --sa-resource-id {storage_account['id']} " + f"--rn {registry_namespace} --sa-resource-id {storage_account['id']}" ) tracked_resources.append(registry["id"]) if not all([settings.env.azext_edge_cluster, settings.env.azext_edge_rg]): raise AssertionError( - "Cannot run init tests without a connected cluster and resource group. " - f"Current settings:\n {settings}" + f"Cannot run init tests without a connected cluster and resource group. Current settings:\n {settings}" ) yield { @@ -65,7 +64,7 @@ def init_test_setup(settings, tracked_resources): "additionalCreateArgs": _strip_quotes(settings.env.azext_edge_create_args), "additionalInitArgs": _strip_quotes(settings.env.azext_edge_init_args), "continueOnError": settings.env.azext_edge_init_continue_on_error or False, - "redeployment": settings.env.azext_edge_init_redeployment or False + "redeployment": settings.env.azext_edge_init_redeployment or False, } if settings.env.azext_edge_aio_cleanup: run( @@ -74,14 +73,12 @@ def init_test_setup(settings, tracked_resources): ) else: # if the init + create worked - make sure that schema reg + storage account aren't deleted - tracked_resources.remove(storage_account['id']) - tracked_resources.remove(registry['id']) + tracked_resources.remove(storage_account["id"]) + tracked_resources.remove(registry["id"]) @pytest.mark.init_scenario_test -def test_init_scenario( - init_test_setup, tracked_files -): +def test_init_scenario(init_test_setup, tracked_files): additional_init_args = init_test_setup["additionalInitArgs"] or "" init_arg_dict = _process_additional_args(additional_init_args) additional_create_args = init_test_setup["additionalCreateArgs"] or "" @@ -92,8 +89,7 @@ def test_init_scenario( resource_group = init_test_setup["resourceGroup"] registry_id = init_test_setup["schemaRegistryId"] instance_name = init_test_setup["instanceName"] - command = f"az iot ops init -g {resource_group} --cluster {cluster_name} "\ - f"--no-progress {additional_init_args} " + command = f"az iot ops init -g {resource_group} --cluster {cluster_name} --no-progress {additional_init_args} " # TODO: assert return once there is a return for init run(command) @@ -101,17 +97,16 @@ def test_init_scenario( assert_aio_init(cluster_name=cluster_name, resource_group=resource_group, **init_arg_dict) # create command - create_command = f"az iot ops create -g {resource_group} --cluster {cluster_name} "\ - f"--sr-resource-id {registry_id} -n {instance_name} "\ + create_command = ( + f"az iot ops create -g {resource_group} --cluster {cluster_name} " + f"--sr-resource-id {registry_id} -n {instance_name} " f"--no-progress {additional_create_args} " + ) # TODO: assert create when return be returning run(create_command) if init_test_setup["redeployment"]: - run( - f"az iot ops delete --name {instance_name} -g {resource_group} " - "-y --no-progress --force" - ) + run(f"az iot ops delete --name {instance_name} -g {resource_group} -y --no-progress --force") run(create_command) # Missing: @@ -133,7 +128,7 @@ def test_init_scenario( cluster_name=cluster_name, resource_group=resource_group, schema_registry_id=registry_id, - **create_arg_dict + **create_arg_dict, ) except Exception as e: # pylint: disable=broad-except # Note we have this since there are multiple Exceptions that can occur: @@ -150,7 +145,6 @@ def test_init_scenario( def assert_aio_init( cluster_name: str, resource_group: str, - ops_config: Optional[str] = None, **_, ): # check extensions installed @@ -166,21 +160,12 @@ def assert_aio_init( while extension_result.get("nextLink"): extension_result = run(f"az rest --method GET --url {extension_result['nextLink']}") extensions.extend(extension_result["value"]) - iot_ops_ext = None, iot_ops_platform_ext = None for ext in extensions: - if ext["properties"]["extensionType"] == "microsoft.iotoperations": - iot_ops_ext = ext - elif ext["properties"]["extensionType"] == "microsoft.iotoperations.platform": + if ext["properties"]["extensionType"] == "microsoft.iotoperations.platform": iot_ops_platform_ext = ext - if ops_config: - ops_config = assemble_nargs_to_dict(ops_config.split()) - configs = iot_ops_ext["properties"]["configurationSettings"] - for key, value in ops_config.items(): - assert configs[key] == value - - if not all([iot_ops_platform_ext, iot_ops_ext]): + if not all([iot_ops_platform_ext]): raise AssertionError( "Extensions for AIO are missing. These are the extensions " f"on the cluster: {[ext['name'] for ext in extensions]}." @@ -189,15 +174,47 @@ def assert_aio_init( def assert_aio_instance( instance_name: str, + cluster_name: str, resource_group: str, schema_registry_id: str, + ops_config: Optional[str] = None, custom_location: Optional[str] = None, description: Optional[str] = None, location: Optional[str] = None, enable_rsync: bool = False, tags: Optional[str] = None, - **_ + **_, ): + # check extensions installed + cluster_id = run( + f"az resource show -n {cluster_name} -g {resource_group} " + "--resource-type Microsoft.Kubernetes/connectedClusters" + )["id"] + extension_result = run( + f"az rest --method GET --url {cluster_id}/providers/" + "Microsoft.KubernetesConfiguration/extensions?api-version=2023-05-01" + ) + extensions = extension_result["value"] + while extension_result.get("nextLink"): + extension_result = run(f"az rest --method GET --url {extension_result['nextLink']}") + extensions.extend(extension_result["value"]) + iot_ops_ext = None + for ext in extensions: + if ext["properties"]["extensionType"] == "microsoft.iotoperations": + iot_ops_ext = ext + + if ops_config: + ops_config = assemble_nargs_to_dict(ops_config.split()) + configs = iot_ops_ext["properties"]["configurationSettings"] + for key, value in ops_config.items(): + assert configs[key] == value + + if not all([iot_ops_ext]): + raise AssertionError( + "Extensions for AIO are missing. These are the extensions " + f"on the cluster: {[ext['name'] for ext in extensions]}." + ) + instance_show = run(f"az iot ops show -n {instance_name} -g {resource_group}") tags = assemble_nargs_to_dict(tags) assert instance_show.get("tags", {}) == tags @@ -211,27 +228,6 @@ def assert_aio_instance( assert instance_props.get("description") == description assert instance_props["schemaRegistryRef"] == {"resource_id": schema_registry_id} - expected_components = {"adr", "akri", "connectors", "dataflows", "schemaRegistry"} - disabled_components = [] - unexpected_components = [] - for component, state in instance_props["components"].items(): - if state["state"].lower() != "enabled": - disabled_components.append(component) - if component in expected_components: - expected_components.remove(component) - else: - unexpected_components.append(component) - - error_msg = [] - if disabled_components: - error_msg.append(f"The following components are disabled: {disabled_components}.") - if unexpected_components: - error_msg.append(f"The following components are unexpected: {unexpected_components}.") - if expected_components: - error_msg.append(f"The following components are missing: {expected_components}.") - if error_msg: - raise AssertionError("\n".join(error_msg)) - tree = run(f"az iot ops show -n {instance_name} -g {resource_group} --tree") # no resource sync rules if disable rsync rules assert ("adr-sync" in tree) is enable_rsync @@ -258,7 +254,7 @@ def assert_broker_args( fw: Optional[str] = None, lt: Optional[str] = None, mp: Optional[str] = None, - **_ + **_, ): if bp: broker_backend_part = bp @@ -317,7 +313,7 @@ def assert_broker_args( if add_insecure_listener: insecure = [listener for listener in listeners if listener["name"] == "default-insecure"][0] ports = insecure["properties"]["ports"] - assert 1883 in [p['port'] for p in ports] + assert 1883 in [p["port"] for p in ports] secure_listener = [listener for listener in listeners if listener["name"] == "default"] listener_props = secure_listener[0]["properties"] @@ -325,10 +321,7 @@ def assert_broker_args( def assert_dataflow_profile_args( - instance_name: str, - resource_group: str, - dataflow_profile_instances: Optional[int] = None, - **_ + instance_name: str, resource_group: str, dataflow_profile_instances: Optional[int] = None, **_ ): profile = run(f"az iot ops dataflow profile list -g {resource_group} -i {instance_name}") profile_props = profile[0]["properties"] @@ -368,44 +361,22 @@ def _strip_quotes(argument: Optional[str]) -> Optional[str]: DEFAULT_BROKER_CONFIG = { - "advanced": { - "encryptInternalTraffic": "Enabled" - }, + "advanced": {"encryptInternalTraffic": "Enabled"}, "cardinality": { - "backendChain": { - "partitions": 2, - "redundancyFactor": 2, - "workers": 2 - }, - "frontend": { - "replicas": 2, - "workers": 2 - } + "backendChain": {"partitions": 2, "redundancyFactor": 2, "workers": 2}, + "frontend": {"replicas": 2, "workers": 2}, }, "diagnostics": { - "logs": { - "level": "info" - }, - "metrics": { - "prometheusPort": 9600 - }, - "selfCheck": { - "intervalSeconds": 30, - "mode": "Enabled", - "timeoutSeconds": 15 - }, + "logs": {"level": "info"}, + "metrics": {"prometheusPort": 9600}, + "selfCheck": {"intervalSeconds": 30, "mode": "Enabled", "timeoutSeconds": 15}, "traces": { "cacheSizeMegabytes": 16, "mode": "Enabled", - "selfTracing": { - "intervalSeconds": 30, - "mode": "Enabled" - }, - "spanChannelCapacity": 1000 - } - }, - "generateResourceLimits": { - "cpu": "Disabled" + "selfTracing": {"intervalSeconds": 30, "mode": "Enabled"}, + "spanChannelCapacity": 1000, + }, }, + "generateResourceLimits": {"cpu": "Disabled"}, "memoryProfile": "Medium", } From 1294a72821de1b638805676146752a23c16363ac Mon Sep 17 00:00:00 2001 From: Ryan K Date: Wed, 23 Oct 2024 09:31:35 -0700 Subject: [PATCH 11/26] fix: patch for deletion when no aio_extension exists (#419) --- azext_edge/edge/providers/orchestration/deletion.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/azext_edge/edge/providers/orchestration/deletion.py b/azext_edge/edge/providers/orchestration/deletion.py index 4294219d2..733817293 100644 --- a/azext_edge/edge/providers/orchestration/deletion.py +++ b/azext_edge/edge/providers/orchestration/deletion.py @@ -149,12 +149,13 @@ def _process(self, force: bool = False): aio_ext_obj = self.resource_map.connected_cluster.get_extensions_by_type(IOT_OPS_EXTENSION_TYPE).get( IOT_OPS_EXTENSION_TYPE, {} ) - aio_ext_id: str = aio_ext_obj.get("id", "") - aio_ext = next( - (ext for ext in self.resource_map.extensions if ext.resource_id.lower() == aio_ext_id.lower()), None - ) - if aio_ext: - todo_extensions.append(aio_ext) + if aio_ext_obj: + aio_ext_id: str = aio_ext_obj.get("id", "") + aio_ext = next( + (ext for ext in self.resource_map.extensions if ext.resource_id.lower() == aio_ext_id.lower()), None + ) + if aio_ext: + todo_extensions.append(aio_ext) todo_custom_locations = self.resource_map.custom_locations todo_resource_sync_rules = [] todo_resources = [] From a2e5a4d9419bd400c0aa9432bab026f3911c0f49 Mon Sep 17 00:00:00 2001 From: Ryan K Date: Wed, 23 Oct 2024 09:35:01 -0700 Subject: [PATCH 12/26] chore: template update for AIO extension 0.8.25 and container storage 2.2.1 (#420) --- azext_edge/edge/providers/orchestration/template.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/azext_edge/edge/providers/orchestration/template.py b/azext_edge/edge/providers/orchestration/template.py index 2f3d3bf92..cf17cdc46 100644 --- a/azext_edge/edge/providers/orchestration/template.py +++ b/azext_edge/edge/providers/orchestration/template.py @@ -56,7 +56,7 @@ def copy(self) -> "TemplateBlueprint": "languageVersion": "2.0", "contentVersion": "1.0.0.0", "metadata": { - "_generator": {"name": "bicep", "version": "0.30.23.60470", "templateHash": "16041392394653918543"} + "_generator": {"name": "bicep", "version": "0.30.23.60470", "templateHash": "12017477435115453809"} }, "definitions": { "_1.AdvancedConfig": { @@ -215,13 +215,13 @@ def copy(self) -> "TemplateBlueprint": "VERSIONS": { "platform": "0.7.6", "secretStore": "0.6.7", - "containerStorage": "2.2.0", + "containerStorage": "2.2.1", "openServiceMesh": "1.2.10", }, "TRAINS": { "platform": "preview", "secretStore": "preview", - "containerStorage": "preview", + "containerStorage": "stable", "openServiceMesh": "stable", }, "faultTolerantStorageClass": "[coalesce(tryGet(tryGet(parameters('advancedConfig'), 'edgeStorageAccelerator'), 'diskStorageClass'), 'acstor-arccontainerstorage-storage-pool')]", @@ -358,7 +358,7 @@ def copy(self) -> "TemplateBlueprint": "languageVersion": "2.0", "contentVersion": "1.0.0.0", "metadata": { - "_generator": {"name": "bicep", "version": "0.30.23.60470", "templateHash": "15107151171792030409"} + "_generator": {"name": "bicep", "version": "0.30.23.60470", "templateHash": "12344544595454159338"} }, "definitions": { "_1.AdvancedConfig": { @@ -530,7 +530,7 @@ def copy(self) -> "TemplateBlueprint": "variables": { "AIO_EXTENSION_SUFFIX": "[take(uniqueString(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName'))), 5)]", "AIO_EXTENSION_SCOPE": {"cluster": {"releaseNamespace": "azure-iot-operations"}}, - "VERSIONS": {"iotOperations": "0.8.20"}, + "VERSIONS": {"iotOperations": "0.8.25"}, "TRAINS": {"iotOperations": "integration"}, "MQTT_SETTINGS": { "brokerListenerServiceName": "aio-broker", From 9890a3dbf206f24240b1fb3e7eaf1bde2b5962be Mon Sep 17 00:00:00 2001 From: Ryan K Date: Wed, 23 Oct 2024 11:52:30 -0700 Subject: [PATCH 13/26] chore: update insecure listener api-version to match template versions (#421) --- azext_edge/edge/providers/orchestration/template.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azext_edge/edge/providers/orchestration/template.py b/azext_edge/edge/providers/orchestration/template.py index cf17cdc46..52e405bc4 100644 --- a/azext_edge/edge/providers/orchestration/template.py +++ b/azext_edge/edge/providers/orchestration/template.py @@ -816,7 +816,7 @@ def copy(self) -> "TemplateBlueprint": def get_insecure_listener(instance_name: str, broker_name: str) -> dict: return { "type": "Microsoft.IoTOperations/instances/brokers/listeners", - "apiVersion": "2024-08-15-preview", + "apiVersion": "2024-09-15-preview", "name": f"{instance_name}/{broker_name}/{AIO_INSECURE_LISTENER_NAME}", "extendedLocation": { "name": "[resourceId('Microsoft.ExtendedLocation/customLocations', parameters('customLocationName'))]", From 7fdce2022f3f26bf413600be639f8323fa4d39b3 Mon Sep 17 00:00:00 2001 From: Elsie4ever <3467996@gmail.com> Date: Wed, 23 Oct 2024 15:08:45 -0700 Subject: [PATCH 14/26] feat: add `az iot ops connector opcua` commands (#415) --- azext_edge/edge/_help.py | 119 +++++ azext_edge/edge/command_map.py | 19 + azext_edge/edge/commands_connector.py | 57 ++ azext_edge/edge/params.py | 64 +++ .../orchestration/connected_cluster.py | 9 + .../orchestration/resources/clusters.py | 24 +- .../resources/connector/__init__.py | 5 + .../resources/connector/opcua/__init__.py | 5 + .../resources/connector/opcua/certs.py | 489 ++++++++++++++++++ azext_edge/edge/util/az_client.py | 19 + azext_edge/edge/util/file_operations.py | 36 +- .../resources/connector/__init__.py | 5 + .../resources/connector/opcua/__init__.py | 5 + .../resources/connector/opcua/conftest.py | 180 +++++++ .../opcua/test_opcua_certs_client_unit.py | 315 +++++++++++ .../opcua/test_opcua_certs_issuer_unit.py | 403 +++++++++++++++ .../opcua/test_opcua_certs_trust_unit.py | 247 +++++++++ .../edge/orchestration/test_deletion_unit.py | 57 +- azext_edge/tests/helpers.py | 16 + 19 files changed, 2005 insertions(+), 69 deletions(-) create mode 100644 azext_edge/edge/commands_connector.py create mode 100644 azext_edge/edge/providers/orchestration/resources/connector/__init__.py create mode 100644 azext_edge/edge/providers/orchestration/resources/connector/opcua/__init__.py create mode 100644 azext_edge/edge/providers/orchestration/resources/connector/opcua/certs.py create mode 100644 azext_edge/tests/edge/orchestration/resources/connector/__init__.py create mode 100644 azext_edge/tests/edge/orchestration/resources/connector/opcua/__init__.py create mode 100644 azext_edge/tests/edge/orchestration/resources/connector/opcua/conftest.py create mode 100644 azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_client_unit.py create mode 100644 azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_issuer_unit.py create mode 100644 azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_trust_unit.py diff --git a/azext_edge/edge/_help.py b/azext_edge/edge/_help.py index 77a93839c..5c5e73b19 100644 --- a/azext_edge/edge/_help.py +++ b/azext_edge/edge/_help.py @@ -1308,6 +1308,125 @@ def load_iotops_help(): -l westus2 --desc 'Contoso factory X1 schemas' --display-name 'Contoso X1' --tags env=prod """ + helps[ + "iot ops connector" + ] = """ + type: group + short-summary: Connector management. + """ + + helps[ + "iot ops connector opcua" + ] = """ + type: group + short-summary: OPC UA connector management. + long-summary: | + The connector for OPC UA enables your industrial OPC UA environment to input data into + your local workloads running on a Kubernetes cluster, and into your cloud workloads. + See the following resource for more info https://aka.ms/overview-connector-opcua-broker + """ + + helps[ + "iot ops connector opcua trust" + ] = """ + type: group + short-summary: Manage trusted certificates for the OPC UA Broker. + long-summary: | + The trusted certificate list contains the certificates of all the OPC UA servers that the + connector for OPC UA trusts. If the connector for OPC UA trusts a certificate authority, + it automatically trusts any server that has a valid application instance certificate signed + by the certificate authority. + For more info, see https://aka.ms/opcua-certificates + """ + + helps[ + "iot ops connector opcua trust add" + ] = """ + type: command + short-summary: Add a trusted certificate to the OPC UA Broker's trusted certificate list. + long-summary: | + The certificate file extension must be .der or .crt. Azure resource secretproviderclass + 'opc-ua-connector' and secretsync 'aio-opc-ua-broker-trust-list' will be created if not found. + examples: + - name: Add a trusted certificate to the OPC UA Broker's trusted certificate list. + text: > + az iot ops connector opcua trust add --instance instance --resource-group instanceresourcegroup + --certificate-file "certificate.der" + - name: Add a trusted certificate to the OPC UA Broker's trusted certificate list with custom secret name. + text: > + az iot ops connector opcua trust add --instance instance --resource-group instanceresourcegroup + --certificate-file "certificate.crt" --secret custom-secret-name + """ + + helps[ + "iot ops connector opcua issuer" + ] = """ + type: group + short-summary: Manage issuer certificates for the OPC UA Broker. + long-summary: | + The issuer certificate list stores the certificate authority certificates that the connector + for OPC UA trusts. If user's OPC UA server's application instance certificate is signed by + an intermediate certificate authority, but user does not want to automatically trust all the + certificates issued by the certificate authority, an issuer certificate list can be used to + manage the trust relationship. + For more info, see https://aka.ms/opcua-certificates + """ + + helps[ + "iot ops connector opcua issuer add" + ] = """ + type: command + short-summary: Add an issuer certificate to the OPC UA Broker's issuer certificate list. + long-summary: | + The certificate file extension must be .der, .crt or .crl. When adding a .crl file, a .der + or .crt file with same file name must be added first. Azure resource secretproviderclass + 'opc-ua-connector'and secretsync 'aio-opc-ua-broker-issuer-list' will be created if not found. + examples: + - name: Add an issuer certificate in the OPC UA Broker's issuer certificate list. + text: > + az iot ops connector opcua issuer add --instance instance --resource-group instanceresourcegroup + --certificate-file "certificate.der" + - name: Add an issuer certificate with .crl extension to the OPC UA Broker's issuer certificate list with same + file name as the .der file mentioned above. + text: > + az iot ops connector opcua issuer add --instance instance --resource-group instanceresourcegroup + --certificate-file "certificate.crl" + - name: Add an issuer certificate to the OPC UA Broker's issuer certificate list with custom secret name. + text: > + az iot ops connector opcua issuer add --instance instance --resource-group instanceresourcegroup + --certificate-file "certificate.der" --secret custom-secret-name + """ + + helps[ + "iot ops connector opcua client" + ] = """ + type: group + short-summary: Manage enterprise grade client application instance certificate for the OPC UA Broker. + long-summary: | + The connector for OPC UA makes use of a single OPC UA application instance certificate + for all the sessions it establishes to collect telemetry data from OPC UA servers. + For more info, see https://aka.ms/opcua-certificates + """ + + helps[ + "iot ops connector opcua client add" + ] = """ + type: command + short-summary: Add an enterprise grade client application instance certificate. + long-summary: | + The public key file extension must be .der and private key file extension + must be .pem. Please make sure to use same filename for public key and + private key file. Azure resource secretproviderclass 'opc-ua-connector' + and secretsync 'aio-opc-ua-broker-client-certificate' will be created + if not found. + examples: + - name: Add an client certificate. + text: > + az iot ops connector opcua client add --instance instance --resource-group instanceresourcegroup + --public-key-file "newopc.der" --private-key-file "newopc.pem" --subject-name "aio-opc-opcuabroker" + --application-uri "urn:microsoft.com:aio:opc:opcuabroker" + """ + helps[ "iot ops schema version" ] = """ diff --git a/azext_edge/edge/command_map.py b/azext_edge/edge/command_map.py index 138fdbe9b..57fc14e4d 100644 --- a/azext_edge/edge/command_map.py +++ b/azext_edge/edge/command_map.py @@ -16,6 +16,7 @@ secretsync_resource_ops = CliCommandType(operations_tmpl="azext_edge.edge.commands_secretsync#{}") asset_resource_ops = CliCommandType(operations_tmpl="azext_edge.edge.commands_assets#{}") aep_resource_ops = CliCommandType(operations_tmpl="azext_edge.edge.commands_asset_endpoint_profiles#{}") +connector_resource_ops = CliCommandType(operations_tmpl="azext_edge.edge.commands_connector#{}") def load_iotops_commands(self, _): @@ -184,6 +185,24 @@ def load_iotops_commands(self, _): cmd_group.command("list", "list_registries") cmd_group.command("delete", "delete_registry") + with self.command_group( + "iot ops connector opcua trust", + command_type=connector_resource_ops, + ) as cmd_group: + cmd_group.command("add", "add_connector_opcua_trust") + + with self.command_group( + "iot ops connector opcua issuer", + command_type=connector_resource_ops, + ) as cmd_group: + cmd_group.command("add", "add_connector_opcua_issuer") + + with self.command_group( + "iot ops connector opcua client", + command_type=connector_resource_ops, + ) as cmd_group: + cmd_group.command("add", "add_connector_opcua_client") + with self.command_group( "iot ops schema version", command_type=schema_resource_ops, diff --git a/azext_edge/edge/commands_connector.py b/azext_edge/edge/commands_connector.py new file mode 100644 index 000000000..317ffc242 --- /dev/null +++ b/azext_edge/edge/commands_connector.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- + +from typing import Optional +from .providers.orchestration.resources.connector.opcua.certs import OpcUACerts + + +def add_connector_opcua_trust( + cmd, + instance_name: str, + resource_group: str, + file: str, + secret_name: Optional[str] = None, +) -> dict: + return OpcUACerts(cmd).trust_add( + instance_name=instance_name, + resource_group=resource_group, + file=file, + secret_name=secret_name, + ) + + +def add_connector_opcua_issuer( + cmd, + instance_name: str, + resource_group: str, + file: str, + secret_name: Optional[str] = None, +) -> dict: + return OpcUACerts(cmd).issuer_add( + instance_name=instance_name, + resource_group=resource_group, + file=file, + secret_name=secret_name, + ) + + +def add_connector_opcua_client( + cmd, + instance_name: str, + resource_group: str, + public_key_file: str, + private_key_file: str, + subject_name: str, + application_uri: str, +) -> dict: + return OpcUACerts(cmd).client_add( + instance_name=instance_name, + resource_group=resource_group, + public_key_file=public_key_file, + private_key_file=private_key_file, + subject_name=subject_name, + application_uri=application_uri, + ) diff --git a/azext_edge/edge/params.py b/azext_edge/edge/params.py index f4043ee88..4dc525542 100644 --- a/azext_edge/edge/params.py +++ b/azext_edge/edge/params.py @@ -1269,6 +1269,70 @@ def load_iotops_arguments(self, _): "/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/roleDefinitions/{roleId}", ) + with self.argument_context("iot ops connector opcua") as context: + context.argument( + "instance_name", + options_list=["--instance", "-i"], + help="IoT Operations instance name.", + ) + context.argument( + "resource_group", + options_list=["--resource-group", "-g"], + help="Instance resource group.", + ) + + with self.argument_context("iot ops connector opcua trust") as context: + context.argument( + "file", + options_list=["--certificate-file", "--cf"], + help="Path to the certificate file in .der or .crt format.", + ) + context.argument( + "secret_name", + options_list=["--secret", "-s"], + help="Secret name in the Key Vault. If not provided, the " + "certificate file name will be used to generate the secret name.", + ) + + with self.argument_context("iot ops connector opcua issuer") as context: + context.argument( + "file", + options_list=["--certificate-file", "--cf"], + help="Path to the certificate file in .der, .crt or .crl format.", + ) + context.argument( + "secret_name", + options_list=["--secret-name", "-s"], + help="Secret name in the Key Vault. If not provided, the " + "certificate file name will be used to generate the secret name.", + ) + + with self.argument_context("iot ops connector opcua client") as context: + context.argument( + "public_key_file", + options_list=["--public-key-file", "--pkf"], + help="File that contains the enterprise grade application " + "instance certificate public key in .der format. File " + "name will be used to generate the public key secret name.", + ) + context.argument( + "private_key_file", + options_list=["--private-key-file", "--prkf"], + help="File that contains the enterprise grade application " + "instance certificate private key in .pem format. File name " + "will be used to generate the private key secret name.", + ) + context.argument( + "subject_name", + options_list=["--subject-name", "--sn"], + help="The subject name string embedded in the application instance certificate.", + ) + context.argument( + "application_uri", + options_list=["--application-uri", "--au"], + help="The application instance URI embedded in the application instance.", + ) + with self.argument_context("iot ops schema version") as context: context.argument( "version_name", diff --git a/azext_edge/edge/providers/orchestration/connected_cluster.py b/azext_edge/edge/providers/orchestration/connected_cluster.py index bd60d5304..7e92c7b16 100644 --- a/azext_edge/edge/providers/orchestration/connected_cluster.py +++ b/azext_edge/edge/providers/orchestration/connected_cluster.py @@ -139,6 +139,15 @@ def get_resource_sync_rules(self, custom_location_id: str) -> Optional[List[dict result = self.resource_graph.query_resources(query=query) return self._process_query_result(result) + def update_aio_extension(self, extension_name: str, properties: dict) -> dict: + update_payload = {"properties": properties} + return self.clusters.extensions.update_cluster_extension( + resource_group_name=self.resource_group_name, + cluster_name=self.cluster_name, + extension_name=extension_name, + update_payload=update_payload, + ) + def _process_query_result(self, result: dict, first: bool = False) -> Optional[Union[dict, List[dict]]]: if "data" in result and result["data"]: if first: diff --git a/azext_edge/edge/providers/orchestration/resources/clusters.py b/azext_edge/edge/providers/orchestration/resources/clusters.py index 3bcfda5f6..31610f3c2 100644 --- a/azext_edge/edge/providers/orchestration/resources/clusters.py +++ b/azext_edge/edge/providers/orchestration/resources/clusters.py @@ -8,11 +8,7 @@ from knack.log import get_logger -from ....util.az_client import ( - get_clusterconfig_mgmt_client, - get_connectedk8s_mgmt_client, - wait_for_terminal_state -) +from ....util.az_client import get_clusterconfig_mgmt_client, get_connectedk8s_mgmt_client, wait_for_terminal_state from ....util.queryable import Queryable logger = get_logger(__name__) @@ -63,11 +59,13 @@ def update_cluster_extension( extension_name: str, update_payload: dict, ) -> Iterable[dict]: - return wait_for_terminal_state(self.ops.begin_update( - resource_group_name=resource_group_name, - cluster_rp="Microsoft.Kubernetes", - cluster_resource_name="connectedClusters", - cluster_name=cluster_name, - extension_name=extension_name, - patch_extension=update_payload - )) + return wait_for_terminal_state( + self.ops.begin_update( + resource_group_name=resource_group_name, + cluster_rp="Microsoft.Kubernetes", + cluster_resource_name="connectedClusters", + cluster_name=cluster_name, + extension_name=extension_name, + patch_extension=update_payload, + ) + ) diff --git a/azext_edge/edge/providers/orchestration/resources/connector/__init__.py b/azext_edge/edge/providers/orchestration/resources/connector/__init__.py new file mode 100644 index 000000000..ced5cdf5b --- /dev/null +++ b/azext_edge/edge/providers/orchestration/resources/connector/__init__.py @@ -0,0 +1,5 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- diff --git a/azext_edge/edge/providers/orchestration/resources/connector/opcua/__init__.py b/azext_edge/edge/providers/orchestration/resources/connector/opcua/__init__.py new file mode 100644 index 000000000..ced5cdf5b --- /dev/null +++ b/azext_edge/edge/providers/orchestration/resources/connector/opcua/__init__.py @@ -0,0 +1,5 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- diff --git a/azext_edge/edge/providers/orchestration/resources/connector/opcua/certs.py b/azext_edge/edge/providers/orchestration/resources/connector/opcua/certs.py new file mode 100644 index 000000000..f4e2c8130 --- /dev/null +++ b/azext_edge/edge/providers/orchestration/resources/connector/opcua/certs.py @@ -0,0 +1,489 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- + +import os +from typing import List, Optional, Tuple + +from azure.core.paging import PageIterator +from azure.core.exceptions import ResourceNotFoundError +from azure.cli.core.azclierror import InvalidArgumentValueError +from knack.log import get_logger +from rich.console import Console +import yaml + +from ....common import CUSTOM_LOCATIONS_API_VERSION +from ...instances import Instances +from ....work import IOT_OPS_EXTENSION_TYPE +from ......util.file_operations import read_file_content, validate_file_extension +from ......util.queryable import Queryable +from ......util.az_client import ( + parse_resource_id, + get_keyvault_client, + get_ssc_mgmt_client, + wait_for_terminal_state, +) + +logger = get_logger(__name__) + +console = Console() + +OPCUA_SPC_NAME = "opc-ua-connector" +OPCUA_TRUST_LIST_SECRET_SYNC_NAME = "aio-opc-ua-broker-trust-list" +OPCUA_ISSUER_LIST_SECRET_SYNC_NAME = "aio-opc-ua-broker-issuer-list" +OPCUA_CLIENT_CERT_SECRET_SYNC_NAME = "aio-opc-ua-broker-client-certificate" +SERVICE_ACCOUNT_NAME = "aio-ssc-sa" + + +class OpcUACerts(Queryable): + + def __init__(self, cmd): + super().__init__(cmd=cmd) + self.instances = Instances(self.cmd) + self.ssc_mgmt_client = get_ssc_mgmt_client( + subscription_id=self.default_subscription_id, + ) + + def trust_add(self, instance_name: str, resource_group: str, file: str, secret_name: Optional[str] = None) -> dict: + cl_resources = self._get_cl_resources(instance_name=instance_name, resource_group=resource_group) + secretsync_spc = self._find_existing_spc(instance_name=instance_name, cl_resources=cl_resources) + + # get file extension + file_name = os.path.basename(file) + # get cert name by removing extension and path in front + cert_extension = validate_file_extension(file_name, [".der", ".crt"]) + + # get properties from default spc + spc_properties = secretsync_spc.get("properties", {}) + spc_keyvault_name = spc_properties.get("keyvaultName", "") + spc_tenant_id = spc_properties.get("tenantId", "") + spc_client_id = spc_properties.get("clientId", "") + + self.keyvault_client = get_keyvault_client( + subscription_id=self.default_subscription_id, + keyvault_name=spc_keyvault_name, + ) + + secrets: PageIterator = self.keyvault_client.list_properties_of_secrets() + + secret_name = secret_name if secret_name else file_name.replace(".", "-") + + # iterate over secrets to check if secret with same name exists + secret_name = self._check_and_update_secret_name(secrets, secret_name, spc_keyvault_name) + self._upload_to_key_vault(secret_name, file, cert_extension) + + # check if there is a spc called "opc-ua-connector", if not create one + try: + opcua_spc = self.ssc_mgmt_client.azure_key_vault_secret_provider_classes.get( + resource_group_name=resource_group, + azure_key_vault_secret_provider_class_name=OPCUA_SPC_NAME, + ) + except ResourceNotFoundError: + opcua_spc = {} + + self._add_secrets_to_spc( + secrets=[secret_name], + spc=opcua_spc, + resource_group=resource_group, + spc_keyvault_name=spc_keyvault_name, + spc_tenant_id=spc_tenant_id, + spc_client_id=spc_client_id, + ) + + # check if there is a secret sync called "aio-opc-ua-broker-trust-list ", if not create one + try: + opcua_secret_sync = self.ssc_mgmt_client.secret_syncs.get( + resource_group_name=resource_group, + secret_sync_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, + ) + except ResourceNotFoundError: + opcua_secret_sync = {} + + return self._add_secrets_to_secret_sync( + secrets=[(secret_name, file_name)], + secret_sync=opcua_secret_sync, + resource_group=resource_group, + spc_name=OPCUA_SPC_NAME, + secret_sync_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, + ) + + def issuer_add( + self, instance_name: str, resource_group: str, file: str, secret_name: Optional[str] = None + ) -> dict: + cl_resources = self._get_cl_resources(instance_name=instance_name, resource_group=resource_group) + secretsync_spc = self._find_existing_spc(instance_name=instance_name, cl_resources=cl_resources) + + # get file extension + file_name = os.path.basename(file) + cert_extension = validate_file_extension(file_name, [".der", ".crt", ".crl"]) + + # get properties from default spc + spc_properties = secretsync_spc.get("properties", {}) + spc_keyvault_name = spc_properties.get("keyvaultName", "") + spc_tenant_id = spc_properties.get("tenantId", "") + spc_client_id = spc_properties.get("clientId", "") + + self.keyvault_client = get_keyvault_client( + subscription_id=self.default_subscription_id, + keyvault_name=spc_keyvault_name, + ) + + secrets: PageIterator = self.keyvault_client.list_properties_of_secrets() + + # get cert name by removing extension + cert_name = os.path.splitext(file_name)[0] + + try: + opcua_secret_sync = self.ssc_mgmt_client.secret_syncs.get( + resource_group_name=resource_group, + secret_sync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, + ) + except ResourceNotFoundError: + opcua_secret_sync = {} + + if cert_extension == ".crl": + matched_names = [] + if opcua_secret_sync: + secret_mapping = opcua_secret_sync.get("properties", {}).get("objectSecretMapping", []) + possible_file_names = [f"{cert_name}.crt", f"{cert_name}.der"] + matched_names = [ + mapping["targetKey"] for mapping in secret_mapping if mapping["targetKey"] in possible_file_names + ] + + if not opcua_secret_sync or not matched_names: + raise InvalidArgumentValueError( + f"Cannot add .crl {file_name} without corresponding .crt or .der file." + ) + + secret_name = secret_name if secret_name else file_name.replace(".", "-") + + # iterate over secrets to check if secret with same name exists + secret_name = self._check_and_update_secret_name(secrets, secret_name, spc_keyvault_name) + self._upload_to_key_vault(secret_name, file, cert_extension) + + # check if there is a spc called "opc-ua-connector", if not create one + try: + opcua_spc = self.ssc_mgmt_client.azure_key_vault_secret_provider_classes.get( + resource_group_name=resource_group, + azure_key_vault_secret_provider_class_name=OPCUA_SPC_NAME, + ) + except ResourceNotFoundError: + opcua_spc = {} + + self._add_secrets_to_spc( + secrets=[secret_name], + spc=opcua_spc, + resource_group=resource_group, + spc_keyvault_name=spc_keyvault_name, + spc_tenant_id=spc_tenant_id, + spc_client_id=spc_client_id, + ) + + return self._add_secrets_to_secret_sync( + secrets=[(secret_name, file_name)], + secret_sync=opcua_secret_sync, + resource_group=resource_group, + spc_name=OPCUA_SPC_NAME, + secret_sync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, + ) + + def client_add( + self, + instance_name: str, + resource_group: str, + public_key_file: str, + private_key_file: str, + subject_name: str, + application_uri: str, + ) -> dict: + # inform user if the provided cert was issued by a CA, the CA cert must be added to the issuers list. + logger.warning("Please ensure the certificate must be added to the issuers list if it was issued by a CA. ") + cl_resources = self._get_cl_resources(instance_name=instance_name, resource_group=resource_group) + secretsync_spc = self._find_existing_spc(instance_name=instance_name, cl_resources=cl_resources) + + # process all the file validations before secret creations + self._validate_key_files(public_key_file, private_key_file) + + # get properties from default spc + spc_properties = secretsync_spc.get("properties", {}) + spc_keyvault_name = spc_properties.get("keyvaultName", "") + spc_client_id = spc_properties.get("clientId", "") + spc_tenant_id = spc_properties.get("tenantId", "") + + self.keyvault_client = get_keyvault_client( + subscription_id=self.default_subscription_id, + keyvault_name=spc_keyvault_name, + ) + + secrets: PageIterator = self.keyvault_client.list_properties_of_secrets() + + # check if there is a spc called "opc-ua-connector", if not create one + try: + opcua_spc = self.ssc_mgmt_client.azure_key_vault_secret_provider_classes.get( + resource_group_name=resource_group, + azure_key_vault_secret_provider_class_name=OPCUA_SPC_NAME, + ) + except ResourceNotFoundError: + opcua_spc = {} + + # check if there is a secret sync called "aio-opc-ua-broker-client-certificate", if not create one + try: + opcua_secret_sync = self.ssc_mgmt_client.secret_syncs.get( + resource_group_name=resource_group, + secret_sync_name=OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, + ) + except ResourceNotFoundError: + opcua_secret_sync = {} + + secrets_to_add = [] + for file in [public_key_file, private_key_file]: + file_name = os.path.basename(file) + file_name_info = os.path.splitext(file_name) + cert_extension = file_name_info[1].replace(".", "") + cert_name = file_name_info[0].replace(".", "-") + secret_name = f"{cert_name}-{cert_extension}" + + # iterate over secrets to check if secret with same name exists + secret_name = self._check_and_update_secret_name(secrets, secret_name, spc_keyvault_name) + self._upload_to_key_vault(secret_name, file, cert_extension) + secrets_to_add.append((secret_name, file_name)) + + self._add_secrets_to_spc( + secrets=[secret[0] for secret in secrets_to_add], + spc=opcua_spc, + resource_group=resource_group, + spc_keyvault_name=spc_keyvault_name, + spc_tenant_id=spc_tenant_id, + spc_client_id=spc_client_id, + ) + + self._add_secrets_to_secret_sync( + secrets=secrets_to_add, + secret_sync=opcua_secret_sync, + resource_group=resource_group, + spc_name=OPCUA_SPC_NAME, + secret_sync_name=OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, + ) + + # update opcua extension + return self._update_client_secret_to_extension( + subject_name=subject_name, + application_uri=application_uri, + ) + + def _validate_key_files(self, public_key_file: str, private_key_file: str): + # validate public key file end with .der + validate_file_extension(public_key_file, [".der"]) + # validate private key file end with .pem + validate_file_extension(private_key_file, [".pem"]) + + # validate public key and private key has matching file name without extension + public_key_name = os.path.basename(public_key_file) + public_key_name = os.path.splitext(public_key_name)[0] + private_key_name = os.path.basename(private_key_file) + private_key_name = os.path.splitext(private_key_name)[0] + + if public_key_name != private_key_name: + raise ValueError(f"Public key file {public_key_name} and private key file {private_key_name} must match.") + + def _process_fortos_yaml(self, object_text: str, secret_entry: Optional[dict] = None) -> str: + if object_text: + objects_obj = yaml.safe_load(object_text) + else: + objects_obj = {"array": []} + entry_text = yaml.safe_dump(secret_entry, indent=6) + objects_obj["array"].append(entry_text) + object_text = yaml.safe_dump(objects_obj, indent=6) + # TODO: formatting will be removed once fortos service fixes the formatting issue + return object_text.replace("\n- |", "\n - |") + + def _get_cl_resources(self, instance_name: str, resource_group: str) -> dict: + self.instance = self.instances.show(name=instance_name, resource_group_name=resource_group) + self.resource_map = self.instances.get_resource_map(self.instance) + custom_location = self.resource_client.resources.get_by_id( + resource_id=self.instance["extendedLocation"]["name"], api_version=CUSTOM_LOCATIONS_API_VERSION + ) + cl_resources = self.resource_map.connected_cluster.get_aio_resources(custom_location_id=custom_location["id"]) + return cl_resources + + def _find_existing_spc(self, instance_name: str, cl_resources: List[dict]) -> dict: + # check if secret sync enabled by getting the default secretproviderclass + secretsync_spc = None + + if cl_resources: + for resource in cl_resources: + if resource["type"].lower() == "microsoft.secretsynccontroller/azurekeyvaultsecretproviderclasses": + resource_id_container = parse_resource_id(resource["id"]) + secretsync_spc = self.ssc_mgmt_client.azure_key_vault_secret_provider_classes.get( + resource_group_name=resource_id_container.resource_group_name, + azure_key_vault_secret_provider_class_name=resource_id_container.resource_name, + ) + break + + if not secretsync_spc: + raise ResourceNotFoundError( + f"Secret sync is not enabled for the instance {instance_name}. " + "Please enable secret sync before adding certificate." + ) + + return secretsync_spc + + def _check_and_update_secret_name(self, secrets: PageIterator, secret_name: str, spc_keyvault_name: str) -> str: + from rich.prompt import Confirm, Prompt + + new_secret_name = secret_name + for secret in secrets: + if secret.id.endswith(secret_name): + # Prompt user to decide on overwriting the secret + overwrite_secret = Confirm.ask( + f"Secret with name {secret_name} already exists in keyvault {spc_keyvault_name}. " + "Do you want to overwrite the secret name?", + ) + + if overwrite_secret: + new_secret_name = Prompt.ask("Please enter the new secret name") + + return new_secret_name + + return new_secret_name + + def _upload_to_key_vault(self, secret_name: str, file_path: str, cert_extension: str): + with console.status(f"Uploading certificate to keyvault as secret {secret_name}..."): + content = read_file_content(file_path=file_path, read_as_binary=True).hex() + if cert_extension == ".crl": + content_type = "application/pkix-crl" + elif cert_extension == ".der": + content_type = "application/pkix-cert" + else: + content_type = "application/x-pem-file" + + return self.keyvault_client.set_secret( + name=secret_name, value=content, content_type=content_type, tags={"file-encoding": "hex"} + ) + + def _add_secrets_to_spc( + self, + secrets: List[str], + spc: dict, + resource_group: str, + spc_keyvault_name: str, + spc_tenant_id: str, + spc_client_id: str, + ) -> dict: + spc_properties = spc.get("properties", {}) + # stringified yaml array + spc_object = spc_properties.get("objects", "") + + # add new secret to the list + for secret_name in secrets: + secret_entry = { + "objectName": secret_name, + "objectType": "secret", + "objectEncoding": "hex", + } + + spc_object = self._process_fortos_yaml(object_text=spc_object, secret_entry=secret_entry) + + if not spc: + logger.warning(f"Azure Key Vault Secret Provider Class {OPCUA_SPC_NAME} not found, creating new one...") + spc = { + "location": self.instance["location"], + "extendedLocation": self.instance["extendedLocation"], + "properties": { + "clientId": spc_client_id, # The client ID of the service principal + "keyvaultName": spc_keyvault_name, + "tenantId": spc_tenant_id, + "objects": spc_object, + }, + } + else: + spc["properties"]["objects"] = spc_object + + with console.status(f"Adding secret reference in Secret Provider Class {OPCUA_SPC_NAME}..."): + poller = self.ssc_mgmt_client.azure_key_vault_secret_provider_classes.begin_create_or_update( + resource_group_name=resource_group, + azure_key_vault_secret_provider_class_name=OPCUA_SPC_NAME, + resource=spc, + ) + wait_for_terminal_state(poller) + + def _add_secrets_to_secret_sync( + self, + secrets: List[Tuple[str, str]], + secret_sync: dict, + resource_group: str, + spc_name: str, + secret_sync_name: str, + ) -> dict: + # check if there is a secret sync called secret_sync_name, if not create one + secret_mapping = secret_sync.get("properties", {}).get("objectSecretMapping", []) + # add new secret to the list + for secret_name, file_name in secrets: + secret_mapping.append( + { + "sourcePath": secret_name, + "targetKey": file_name, + } + ) + + # find duplicate targetKey + target_keys = [mapping["targetKey"] for mapping in secret_mapping] + if len(target_keys) != len(set(target_keys)): + raise InvalidArgumentValueError("Cannot have duplicate targetKey in objectSecretMapping.") + + if not secret_sync: + logger.warning(f"Secret Sync {secret_sync_name} not found, creating new one...") + secret_sync = { + "location": self.instance["location"], + "extendedLocation": self.instance["extendedLocation"], + "properties": { + "kubernetesSecretType": "Opaque", + "secretProviderClassName": spc_name, + "serviceAccountName": SERVICE_ACCOUNT_NAME, + "objectSecretMapping": secret_mapping, + }, + } + else: + secret_sync["properties"]["objectSecretMapping"] = secret_mapping + + # create a new secret sync + with console.status(f"Adding secret reference to secret sync {secret_sync_name}..."): + poller = self.ssc_mgmt_client.secret_syncs.begin_create_or_update( + resource_group_name=resource_group, + secret_sync_name=secret_sync_name, + resource=secret_sync, + ) + return wait_for_terminal_state(poller) + + def _update_client_secret_to_extension( + self, + subject_name: str, + application_uri: str, + ): + # get the opcua extension + extensions = self.resource_map.connected_cluster.get_extensions_by_type(IOT_OPS_EXTENSION_TYPE) + aio_extension = extensions.get(IOT_OPS_EXTENSION_TYPE) + if not aio_extension: + raise ResourceNotFoundError("IoT Operations extension not found.") + + properties = aio_extension["properties"] + + config_settings = properties.get("configurationSettings", {}) + if not config_settings: + properties["configurationSettings"] = {} + + config_settings["connectors.values.securityPki.applicationCert"] = OPCUA_CLIENT_CERT_SECRET_SYNC_NAME + config_settings["connectors.values.securityPki.subjectName"] = subject_name + config_settings["connectors.values.securityPki.applicationUri"] = application_uri + + aio_extension["properties"]["configurationSettings"] = config_settings + + with console.status( + f"Updating IoT Operations extension to use new secret source {OPCUA_CLIENT_CERT_SECRET_SYNC_NAME}..." + ): + return self.resource_map.connected_cluster.update_aio_extension( + extension_name=aio_extension["name"], + properties=properties, + ) diff --git a/azext_edge/edge/util/az_client.py b/azext_edge/edge/util/az_client.py index e07bda27a..d0fc09206 100644 --- a/azext_edge/edge/util/az_client.py +++ b/azext_edge/edge/util/az_client.py @@ -35,6 +35,7 @@ if TYPE_CHECKING: from azure.core.polling import LROPoller + from azure.keyvault.secrets import SecretClient from ..vendor.clients.authzmgmt import AuthorizationManagementClient from ..vendor.clients.clusterconfigmgmt import KubernetesConfigurationClient @@ -183,6 +184,24 @@ def get_authz_client(subscription_id: str, **kwargs) -> "AuthorizationManagement ) +def get_keyvault_client(subscription_id: str, keyvault_name: str, **kwargs) -> "SecretClient": + from azure.keyvault.secrets import SecretClient + + # TODO: this only supports azure public cloud for now + client = SecretClient( + credential=AZURE_CLI_CREDENTIAL, + subscription_id=subscription_id, + user_agent_policy=UserAgentPolicy(user_agent=USER_AGENT), + vault_url=f"https://{keyvault_name}.vault.azure.net", + **kwargs, + ) + + # wait to set the access token + sleep(5) + + return client + + def wait_for_terminal_state(poller: "LROPoller", wait_sec: int = POLL_WAIT_SEC, **_) -> JSON: # resource client does not handle sigint well counter = 0 diff --git a/azext_edge/edge/util/file_operations.py b/azext_edge/edge/util/file_operations.py index 47c37e074..3fda1fa38 100644 --- a/azext_edge/edge/util/file_operations.py +++ b/azext_edge/edge/util/file_operations.py @@ -23,7 +23,7 @@ def dump_content_to_file( extension: str, fieldnames: Optional[List[str]] = None, output_dir: Optional[str] = None, - replace: bool = False + replace: bool = False, ) -> PurePath: output_dir = normalize_dir(output_dir) file_path = os.path.join(output_dir, f"{file_name}.{extension}") @@ -99,38 +99,36 @@ def deserialize_file_content(file_path: str) -> Any: if not valid_extension or extension == "json": # will always be a list or dict result = _try_loading_as( - loader=json.loads, - content=content, - error_type=json.JSONDecodeError, - raise_error=valid_extension + loader=json.loads, content=content, error_type=json.JSONDecodeError, raise_error=valid_extension ) if (not result and not valid_extension) or extension in ["yaml", "yml"]: # can be list, dict, str, int, bool, none result = _try_loading_as( - loader=yaml.safe_load, - content=content, - error_type=yaml.YAMLError, - raise_error=valid_extension + loader=yaml.safe_load, content=content, error_type=yaml.YAMLError, raise_error=valid_extension ) if (not result and not valid_extension) or extension == "csv": # iterrable object so lets cast to list result = _try_loading_as( - loader=csv.DictReader, - content=content.splitlines(), - error_type=csv.Error, - raise_error=valid_extension + loader=csv.DictReader, content=content.splitlines(), error_type=csv.Error, raise_error=valid_extension ) if result is not None or valid_extension: return result raise FileOperationError(f"File contents for {file_path} cannot be read.") -def _try_loading_as( - loader: Callable, - content: str, - error_type: Exception, - raise_error: bool = True -) -> Optional[Any]: +def validate_file_extension(file_name: str, expected_exts: List[str]) -> str: + ext = os.path.splitext(file_name)[1] + lowercased_exts = [ext.lower() for ext in expected_exts] + if ext.lower() not in lowercased_exts: + exts_text = ", ".join(expected_exts) + raise ValueError( + f"Invalid file extension found for {file_name}, only {exts_text} file extensions are supported." + ) + + return ext + + +def _try_loading_as(loader: Callable, content: str, error_type: Exception, raise_error: bool = True) -> Optional[Any]: try: return loader(content) except error_type as e: diff --git a/azext_edge/tests/edge/orchestration/resources/connector/__init__.py b/azext_edge/tests/edge/orchestration/resources/connector/__init__.py new file mode 100644 index 000000000..ced5cdf5b --- /dev/null +++ b/azext_edge/tests/edge/orchestration/resources/connector/__init__.py @@ -0,0 +1,5 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- diff --git a/azext_edge/tests/edge/orchestration/resources/connector/opcua/__init__.py b/azext_edge/tests/edge/orchestration/resources/connector/opcua/__init__.py new file mode 100644 index 000000000..ced5cdf5b --- /dev/null +++ b/azext_edge/tests/edge/orchestration/resources/connector/opcua/__init__.py @@ -0,0 +1,5 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- diff --git a/azext_edge/tests/edge/orchestration/resources/connector/opcua/conftest.py b/azext_edge/tests/edge/orchestration/resources/connector/opcua/conftest.py new file mode 100644 index 000000000..f8e90c23f --- /dev/null +++ b/azext_edge/tests/edge/orchestration/resources/connector/opcua/conftest.py @@ -0,0 +1,180 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- + +from typing import List, Optional +from unittest.mock import Mock +import pytest + +import responses +from azext_edge.edge.providers.orchestration.resources.connector.opcua.certs import OPCUA_SPC_NAME +from azext_edge.tests.edge.orchestration.resources.conftest import get_base_endpoint, get_mock_resource +from azext_edge.tests.generators import generate_random_string + + +@pytest.fixture +def mocked_logger(mocker): + patched = mocker.patch( + "azext_edge.edge.providers.orchestration.resources.connector.opcua.certs.logger", + ) + yield patched + + +@pytest.fixture +def mocked_get_resource_client(mocker): + patched = mocker.patch("azext_edge.edge.util.queryable") + yield patched().get_resource_client + + +@pytest.fixture +def mocked_resource_map(mocker): + patched = mocker.patch( + "azext_edge.edge.providers.orchestration.resources.connector.opcua.certs.Instances", + ) + yield patched().get_resource_map + + +@pytest.fixture +def mocked_sleep(mocker): + patched = mocker.patch("azext_edge.edge.util.az_client.sleep", return_value=None) + yield patched + + +def get_spc_endpoint(spc_name: str, resource_group_name: str) -> str: + resource_path = "/azureKeyVaultSecretProviderClasses" + if spc_name: + resource_path += f"/{spc_name}" + return get_base_endpoint( + resource_group_name=resource_group_name, + resource_path=resource_path, + resource_provider="Microsoft.SecretSyncController", + api_version="2024-08-21-preview", + ) + + +def get_secretsync_endpoint(secretsync_name: str, resource_group_name: str) -> str: + resource_path = "/secretSyncs" + if secretsync_name: + resource_path += f"/{secretsync_name}" + return get_base_endpoint( + resource_group_name=resource_group_name, + resource_path=resource_path, + resource_provider="Microsoft.SecretSyncController", + api_version="2024-08-21-preview", + ) + + +def get_secret_endpoint(keyvault_name: str, secret_name: Optional[str] = None) -> str: + resource_path = "/secrets" + if secret_name: + resource_path += f"/{secret_name}" + + return f"https://{keyvault_name}.vault.azure.net{resource_path}?api-version=7.4" + + +def get_mock_spc_record(spc_name: str, resource_group_name: str, objects: Optional[str] = None) -> dict: + objects = objects or "" + return get_mock_resource( + name=spc_name, + resource_path=f"/azureKeyVaultSecretProviderClasses/{spc_name}", + properties={ + "provisioningState": "Succeeded", + "clientId": generate_random_string(), + "keyvaultName": "mock-keyvault", + "objects": objects, + "tenantId": generate_random_string(), + }, + resource_group_name=resource_group_name, + qualified_type="Microsoft.SecretSyncController/AzureKeyVaultSecretProviderClasses", + ) + + +def get_mock_secretsync_record(secretsync_name: str, resource_group_name: str, objects: Optional[str] = None) -> dict: + objects = objects or [] + return get_mock_resource( + name=secretsync_name, + resource_path=f"/secretSyncs/{secretsync_name}", + properties={ + "provisioningState": "Succeeded", + "kubernetesSecretType": "Opaque", + "secretProviderClassName": "opc-ua-connector", + "serviceAccountName": "aio-ssc-sa", + "objectSecretMapping": objects, + }, + resource_group_name=resource_group_name, + qualified_type="Microsoft.SecretSyncController/secretSyncs", + ) + + +def setup_mock_common_responses( + mocked_responses: responses, + spc: dict, + secretsync: dict, + opcua_secretsync_name: str, + rg_name: str, + secret_name: str, +): + # get secrets + mocked_responses.add( + method=responses.GET, + url=get_secret_endpoint(keyvault_name="mock-keyvault"), + json={ + "value": [ + { + "id": "https://mock-keyvault.vault.azure.net/secrets/mock-secret", + } + ] + }, + status=200, + content_type="application/json", + ) + + # set secret + mocked_responses.add( + method=responses.PUT, + url=get_secret_endpoint(keyvault_name="mock-keyvault", secret_name=secret_name), + json={}, + status=200, + content_type="application/json", + ) + + # get opcua spc + mocked_responses.add( + method=responses.GET, + url=get_spc_endpoint(spc_name=OPCUA_SPC_NAME, resource_group_name=rg_name), + json=spc, + status=200, + content_type="application/json", + ) + + # set opcua spc + mocked_responses.add( + method=responses.PUT, + url=get_spc_endpoint(spc_name=OPCUA_SPC_NAME, resource_group_name=rg_name), + json={}, + status=200, + content_type="application/json", + ) + + # get opcua secretsync + mocked_responses.add( + method=responses.GET, + url=get_secretsync_endpoint(secretsync_name=opcua_secretsync_name, resource_group_name=rg_name), + json=secretsync, + status=200, + content_type="application/json", + ) + + +def assemble_resource_map_mock( + resource_map_mock: Mock, + extension: Optional[dict], + custom_locations: Optional[List[dict]], + resources: Optional[List[dict]], +): + resource_map_mock().custom_locations = custom_locations + resource_map_mock().get_resources.return_value = resources + resource_map_mock().connected_cluster.get_extensions_by_type.return_value = extension + resource_map_mock().connected_cluster.get_aio_resources.return_value = resources diff --git a/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_client_unit.py b/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_client_unit.py new file mode 100644 index 000000000..548d32b5d --- /dev/null +++ b/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_client_unit.py @@ -0,0 +1,315 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- + +from unittest.mock import Mock +import pytest + +import responses +from azext_edge.edge.commands_connector import ( + add_connector_opcua_client, +) +from azext_edge.edge.providers.orchestration.resources.connector.opcua.certs import ( + OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, + OPCUA_SPC_NAME, +) +from azext_edge.edge.providers.orchestration.work import IOT_OPS_EXTENSION_TYPE +from .conftest import ( + assemble_resource_map_mock, + get_mock_spc_record, + get_mock_secretsync_record, + get_secret_endpoint, + get_secretsync_endpoint, + get_spc_endpoint, + setup_mock_common_responses, +) +from azext_edge.tests.generators import generate_random_string +from azext_edge.tests.helpers import generate_ops_resource + + +@pytest.mark.parametrize( + "expected_resources_map, client_app_spc, client_app_secretsync," + "public_file_name, private_file_name, expected_secret_sync", + [ + ( + { + "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resource sync rules": [generate_ops_resource()], + "custom locations": [generate_ops_resource()], + "extension": {IOT_OPS_EXTENSION_TYPE: {"id": "aio-ext-id", "name": "aio-ext-name", "properties": {}}}, + "meta": { + "expected_total": 4, + "resource_batches": 1, + }, + }, + get_mock_spc_record(spc_name=OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, resource_group_name="mock-rg" + ), + "/fake/path/certificate.der", + "/fake/path/certificate.pem", + get_mock_secretsync_record( + secretsync_name=OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, + resource_group_name="mock-rg", + objects="new-secret", + ), + ), + ], +) +def test_client_add( + mocker, + mocked_cmd, + mocked_sleep: Mock, + mocked_logger: Mock, + expected_resources_map: dict, + client_app_spc: dict, + client_app_secretsync: dict, + public_file_name: str, + private_file_name: str, + expected_secret_sync: dict, + mocked_resource_map: Mock, + mocked_responses: responses, +): + file_content = b"\x00\x01\x02\x03" + instance_name = generate_random_string() + rg_name = "mock-rg" + + assemble_resource_map_mock( + resource_map_mock=mocked_resource_map, + extension=expected_resources_map["extension"], + custom_locations=expected_resources_map["custom locations"], + resources=expected_resources_map["resources"], + ) + mocked_get_resource_client: Mock = mocker.patch( + "azext_edge.edge.util.queryable.get_resource_client", + ) + mocked_get_resource_client().resources.get_by_id.return_value = {"id": "mock-id"} + mocker.patch( + "azext_edge.edge.providers.orchestration.resources.connector.opcua.certs.read_file_content", + return_value=file_content, + ) + + if expected_resources_map["resources"]: + # get default spc + mocked_responses.add( + method=responses.GET, + url=get_spc_endpoint(spc_name="default-spc", resource_group_name=rg_name), + json=expected_resources_map["resources"][0], + status=200, + content_type="application/json", + ) + + setup_mock_common_responses( + mocked_responses=mocked_responses, + spc=client_app_spc, + secretsync=client_app_secretsync, + opcua_secretsync_name=OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, + rg_name=rg_name, + secret_name="certificate-der", + ) + + # set secret + mocked_responses.add( + method=responses.PUT, + url=get_secret_endpoint(keyvault_name="mock-keyvault", secret_name="certificate-pem"), + json={}, + status=200, + content_type="application/json", + ) + + # set opcua secretsync + mocked_responses.add( + method=responses.PUT, + url=get_secretsync_endpoint( + secretsync_name=OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, resource_group_name=rg_name + ), + json=expected_secret_sync, + status=200, + content_type="application/json", + ) + + result = add_connector_opcua_client( + cmd=mocked_cmd, + instance_name=instance_name, + resource_group=rg_name, + public_key_file=public_file_name, + private_key_file=private_file_name, + application_uri="uri", + subject_name="subjectname", + ) + + if result: + if not client_app_spc: + assert ( + mocked_logger.warning.call_args[0][0] == f"Azure Key Vault Secret Provider Class {OPCUA_SPC_NAME} " + "not found, creating new one..." + ) + + if not client_app_secretsync: + assert ( + mocked_logger.warning.call_args[0][0] == f"Secret Sync {OPCUA_CLIENT_CERT_SECRET_SYNC_NAME} " + "not found, creating new one..." + ) + mocked_resource_map().connected_cluster.get_extensions_by_type.assert_called_once_with( + "microsoft.iotoperations" + ) + mocked_resource_map().connected_cluster.update_aio_extension.assert_called_once_with( + extension_name=expected_resources_map["extension"][IOT_OPS_EXTENSION_TYPE]["name"], + properties={ + "configurationSettings": { + "connectors.values.securityPki.applicationCert": OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, + "connectors.values.securityPki.subjectName": "subjectname", + "connectors.values.securityPki.applicationUri": "uri", + } + }, + ) + + +@pytest.mark.parametrize( + "expected_resources_map, client_app_spc, client_app_secretsync," + "public_file_name, private_file_name, expected_error", + [ + # no default spc + ( + { + "resources": None, + "resource sync rules": None, + "custom locations": None, + "extension": None, + "meta": { + "expected_total": 0, + }, + }, + {}, + {}, + "/fake/path/certificate.der", + "/fake/path/certificate.pem", + "Please enable secret sync before adding certificate.", + ), + # no aio extension + ( + { + "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resource sync rules": [generate_ops_resource()], + "custom locations": [generate_ops_resource()], + "extension": {}, + "meta": { + "expected_total": 4, + "resource_batches": 1, + }, + }, + get_mock_spc_record(spc_name=OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, resource_group_name="mock-rg" + ), + "/fake/path/certificate.der", + "/fake/path/certificate.pem", + "IoT Operations extension not found.", + ), + # file names not matching + ( + { + "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resource sync rules": [generate_ops_resource()], + "custom locations": [generate_ops_resource()], + "extension": {IOT_OPS_EXTENSION_TYPE: {"id": "aio-ext-id", "name": "aio-ext-name", "properties": {}}}, + "meta": { + "expected_total": 4, + "resource_batches": 1, + }, + }, + {}, + {}, + "/fake/path/pubkey.der", + "/fake/path/prikey.pem", + "Public key file pubkey and private key file prikey must match.", + ), + ], +) +def test_client_add_errors( + mocker, + mocked_cmd, + mocked_sleep: Mock, + mocked_logger: Mock, + expected_resources_map: dict, + client_app_spc: dict, + client_app_secretsync: dict, + public_file_name: str, + private_file_name: str, + expected_error: str, + mocked_resource_map: Mock, + mocked_responses: responses, +): + file_content = b"\x00\x01\x02\x03" + instance_name = "mock-instance" + rg_name = "mock-rg" + + assemble_resource_map_mock( + resource_map_mock=mocked_resource_map, + extension=expected_resources_map["extension"], + custom_locations=expected_resources_map["custom locations"], + resources=expected_resources_map["resources"], + ) + mocked_get_resource_client: Mock = mocker.patch( + "azext_edge.edge.util.queryable.get_resource_client", + ) + mocked_get_resource_client().resources.get_by_id.return_value = {"id": "mock-id"} + mocker.patch( + "azext_edge.edge.providers.orchestration.resources.connector.opcua.certs.read_file_content", + return_value=file_content, + ) + + if expected_resources_map["resources"]: + # get default spc + mocked_responses.add( + method=responses.GET, + url=get_spc_endpoint(spc_name="default-spc", resource_group_name=rg_name), + json=expected_resources_map["resources"][0], + status=200, + content_type="application/json", + ) + + if client_app_spc: + setup_mock_common_responses( + mocked_responses=mocked_responses, + spc=client_app_spc, + secretsync=client_app_secretsync, + opcua_secretsync_name=OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, + rg_name=rg_name, + secret_name="certificate-der", + ) + + # set secret + mocked_responses.add( + method=responses.PUT, + url=get_secret_endpoint(keyvault_name="mock-keyvault", secret_name="certificate-pem"), + json={}, + status=200, + content_type="application/json", + ) + + # set opcua secretsync + mocked_responses.add( + method=responses.PUT, + url=get_secretsync_endpoint( + secretsync_name=OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, resource_group_name=rg_name + ), + json=client_app_secretsync, + status=200, + content_type="application/json", + ) + + with pytest.raises(Exception) as e: + add_connector_opcua_client( + cmd=mocked_cmd, + instance_name=instance_name, + resource_group=rg_name, + public_key_file=public_file_name, + private_key_file=private_file_name, + application_uri="uri", + subject_name="subjectname", + ) + + assert expected_error in e.value.args[0] diff --git a/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_issuer_unit.py b/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_issuer_unit.py new file mode 100644 index 000000000..994f9eab0 --- /dev/null +++ b/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_issuer_unit.py @@ -0,0 +1,403 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- + +import os +from unittest.mock import Mock +import pytest + +import responses +from azext_edge.edge.commands_connector import add_connector_opcua_issuer +from azext_edge.edge.providers.orchestration.resources.connector.opcua.certs import ( + OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, + OPCUA_SPC_NAME, +) +from azext_edge.tests.edge.orchestration.resources.connector.opcua.conftest import ( + get_mock_spc_record, + get_mock_secretsync_record, + get_secret_endpoint, + get_secretsync_endpoint, + get_spc_endpoint, +) +from azext_edge.tests.generators import generate_random_string +from azext_edge.tests.helpers import generate_ops_resource + + +@pytest.mark.parametrize( + "expected_resources_map, issuer_list_spc, issuer_list_secretsync, file_name, secret_name, expected_secret_sync", + [ + ( + { + "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resource sync rules": [generate_ops_resource()], + "custom locations": [generate_ops_resource()], + "extensions": [generate_ops_resource()], + "meta": { + "expected_total": 4, + "resource_batches": 1, + }, + }, + get_mock_spc_record(spc_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg" + ), + "/fake/path/certificate.der", + "new-secret", + get_mock_secretsync_record( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, + resource_group_name="mock-rg", + objects="new-secret", + ), + ), + # adding .crl with corresponding .der or crt + ( + { + "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resource sync rules": [generate_ops_resource()], + "custom locations": [generate_ops_resource()], + "extensions": [generate_ops_resource()], + "meta": { + "expected_total": 4, + "resource_batches": 1, + }, + }, + get_mock_spc_record(spc_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, + resource_group_name="mock-rg", + objects=[ + { + "sourcePath": "secret1", + "targetKey": "certificate.der", + } + ], + ), + "/fake/path/certificate.crl", + "new-secret", + get_mock_secretsync_record( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, + resource_group_name="mock-rg", + objects="new-secret", + ), + ), + ], +) +def test_issuer_add( + mocker, + mocked_cmd, + mocked_logger: Mock, + mocked_sleep: Mock, + expected_resources_map: dict, + issuer_list_spc: dict, + issuer_list_secretsync: dict, + file_name: str, + secret_name: str, + expected_secret_sync: dict, + mocked_responses: responses, +): + file_content = b"\x00\x01\x02\x03" + instance_name = generate_random_string() + rg_name = "mock-rg" + + mocker.patch( + "azext_edge.edge.providers.orchestration.resources.connector.opcua.certs.OpcUACerts._get_cl_resources", + return_value=expected_resources_map["resources"], + ) + mocker.patch( + "azext_edge.edge.providers.orchestration.resources.connector.opcua.certs.read_file_content", + return_value=file_content, + ) + + if expected_resources_map["resources"]: + # get default spc + mocked_responses.add( + method=responses.GET, + url=get_spc_endpoint(spc_name="default-spc", resource_group_name=rg_name), + json=expected_resources_map["resources"][0], + status=200, + content_type="application/json", + ) + + # get opcua secretsync + mocked_responses.add( + method=responses.GET, + url=get_secretsync_endpoint( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name=rg_name + ), + json=issuer_list_secretsync, + status=200, + content_type="application/json", + ) + + matched_names = [] + if file_name.endswith("crl") and issuer_list_secretsync: + file_name = os.path.basename(file_name) + possible_file_names = [file_name.replace(".crl", ".der"), file_name.replace(".crl", ".crt")] + matched_names = [ + mapping["targetKey"] + for mapping in issuer_list_secretsync["properties"]["objectSecretMapping"] + if mapping["targetKey"] in possible_file_names + ] + + if not (file_name.endswith("crl") and not matched_names): + # get secrets + mocked_responses.add( + method=responses.GET, + url=get_secret_endpoint(keyvault_name="mock-keyvault"), + json={ + "value": [ + { + "id": "https://mock-keyvault.vault.azure.net/secrets/mock-secret", + } + ] + }, + status=200, + content_type="application/json", + ) + + # set secret + mocked_responses.add( + method=responses.PUT, + url=get_secret_endpoint(keyvault_name="mock-keyvault", secret_name=secret_name), + json={}, + status=200, + content_type="application/json", + ) + + # get opcua spc + mocked_responses.add( + method=responses.GET, + url=get_spc_endpoint(spc_name=OPCUA_SPC_NAME, resource_group_name=rg_name), + json=issuer_list_spc, + status=200, + content_type="application/json", + ) + + if issuer_list_spc: + # set opcua spc + mocked_responses.add( + method=responses.PUT, + url=get_spc_endpoint(spc_name=OPCUA_SPC_NAME, resource_group_name=rg_name), + json={}, + status=200, + content_type="application/json", + ) + + if issuer_list_secretsync: + # set opcua secretsync + mocked_responses.add( + method=responses.PUT, + url=get_secretsync_endpoint( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name=rg_name + ), + json=expected_secret_sync, + status=200, + content_type="application/json", + ) + + result = add_connector_opcua_issuer( + cmd=mocked_cmd, + instance_name=instance_name, + resource_group=rg_name, + file=file_name, + secret_name=secret_name, + ) + + if result: + if not issuer_list_spc: + assert ( + mocked_logger.warning.call_args[0][0] == f"Azure Key Vault Secret Provider Class {OPCUA_SPC_NAME} " + "not found, creating new one..." + ) + return + + if not issuer_list_secretsync: + assert ( + mocked_logger.warning.call_args[0][0] == f"Secret Sync {OPCUA_ISSUER_LIST_SECRET_SYNC_NAME} " + "not found, creating new one..." + ) + return + assert result == expected_secret_sync + + +@pytest.mark.parametrize( + "expected_resources_map, issuer_list_spc, issuer_list_secretsync, file_name, secret_name, expected_error", + [ + ( + { + "resources": None, + "resource sync rules": None, + "custom locations": None, + "extensions": None, + "meta": { + "expected_total": 0, + }, + }, + {}, + {}, + "/fake/path/certificate1.crt", + None, + "Please enable secret sync before adding certificate.", + ), + # adding .crl without corresponding .der or crt + ( + { + "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resource sync rules": [generate_ops_resource()], + "custom locations": [generate_ops_resource()], + "extensions": [generate_ops_resource()], + "meta": { + "expected_total": 4, + "resource_batches": 1, + }, + }, + get_mock_spc_record(spc_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg" + ), + "/fake/path/certificate2.crl", + "new-secret", + "Cannot add .crl certificate2.crl without corresponding .crt or .der file.", + ), + # duplicate targetKey in objectSecretMapping + ( + { + "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resource sync rules": [generate_ops_resource()], + "custom locations": [generate_ops_resource()], + "extensions": [generate_ops_resource()], + "meta": { + "expected_total": 4, + "resource_batches": 1, + }, + }, + get_mock_spc_record(spc_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, + resource_group_name="mock-rg", + objects=[ + { + "targetKey": "certificate3.der", + }, + ], + ), + "/fake/path/certificate3.der", + "new-secret", + "Cannot have duplicate targetKey in objectSecretMapping.", + ), + ], +) +def test_issuer_add_errors( + mocker, + mocked_cmd, + mocked_logger: Mock, + mocked_sleep: Mock, + expected_resources_map: dict, + issuer_list_spc: dict, + issuer_list_secretsync: dict, + file_name: str, + secret_name: str, + expected_error: str, + mocked_responses: responses, +): + file_content = b"\x00\x01\x02\x03" + instance_name = generate_random_string() + rg_name = "mock-rg" + + mocker.patch( + "azext_edge.edge.providers.orchestration.resources.connector.opcua.certs.OpcUACerts._get_cl_resources", + return_value=expected_resources_map["resources"], + ) + mocker.patch( + "azext_edge.edge.providers.orchestration.resources.connector.opcua.certs.read_file_content", + return_value=file_content, + ) + + if expected_resources_map["resources"]: + # get default spc + mocked_responses.add( + method=responses.GET, + url=get_spc_endpoint(spc_name="default-spc", resource_group_name=rg_name), + json=expected_resources_map["resources"][0], + status=200, + content_type="application/json", + ) + + # get opcua secretsync + mocked_responses.add( + method=responses.GET, + url=get_secretsync_endpoint( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name=rg_name + ), + json=issuer_list_secretsync, + status=200, + content_type="application/json", + ) + + if not file_name.endswith("crl"): + # get secrets + mocked_responses.add( + method=responses.GET, + url=get_secret_endpoint(keyvault_name="mock-keyvault"), + json={ + "value": [ + { + "id": "https://mock-keyvault.vault.azure.net/secrets/mock-secret", + } + ] + }, + status=200, + content_type="application/json", + ) + + # set secret + mocked_responses.add( + method=responses.PUT, + url=get_secret_endpoint(keyvault_name="mock-keyvault", secret_name=secret_name), + json={}, + status=200, + content_type="application/json", + ) + + # get opcua spc + mocked_responses.add( + method=responses.GET, + url=get_spc_endpoint(spc_name=OPCUA_SPC_NAME, resource_group_name=rg_name), + json=issuer_list_spc, + status=200, + content_type="application/json", + ) + + # set opcua spc + mocked_responses.add( + method=responses.PUT, + url=get_spc_endpoint(spc_name=OPCUA_SPC_NAME, resource_group_name=rg_name), + json={}, + status=200, + content_type="application/json", + ) + + if not file_name.endswith("der"): + # set opcua secretsync + mocked_responses.add( + method=responses.PUT, + url=get_secretsync_endpoint( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name=rg_name + ), + json={}, + status=200, + content_type="application/json", + ) + + with pytest.raises(Exception) as e: + add_connector_opcua_issuer( + cmd=mocked_cmd, + instance_name=instance_name, + resource_group=rg_name, + file=file_name, + secret_name=secret_name, + ) + + assert expected_error in e.value.args[0] diff --git a/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_trust_unit.py b/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_trust_unit.py new file mode 100644 index 000000000..33541bfd5 --- /dev/null +++ b/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_trust_unit.py @@ -0,0 +1,247 @@ +# coding=utf-8 +# ---------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License file in the project root for license information. +# ---------------------------------------------------------------------------------------------- + +import os +from unittest.mock import Mock +import pytest + +import responses +from azext_edge.edge.commands_connector import add_connector_opcua_trust +from azext_edge.edge.providers.orchestration.resources.connector.opcua.certs import ( + OPCUA_SPC_NAME, + OPCUA_TRUST_LIST_SECRET_SYNC_NAME, +) +from azext_edge.tests.edge.orchestration.resources.connector.opcua.conftest import ( + get_mock_spc_record, + get_mock_secretsync_record, + get_secretsync_endpoint, + get_spc_endpoint, + setup_mock_common_responses, +) +from azext_edge.tests.generators import generate_random_string +from azext_edge.tests.helpers import generate_ops_resource + + +@pytest.mark.parametrize( + "expected_resources_map, trust_list_spc, trust_list_secretsync, file_name, secret_name, expected_secret_sync", + [ + ( + { + "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resource sync rules": [generate_ops_resource()], + "custom locations": [generate_ops_resource()], + "extensions": [generate_ops_resource()], + "meta": { + "expected_total": 4, + "resource_batches": 1, + }, + }, + get_mock_spc_record(spc_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg" + ), + "/fake/path/certificate.der", + "new-secret", + get_mock_secretsync_record( + secretsync_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, + resource_group_name="mock-rg", + objects="new-secret", + ), + ), + ], +) +def test_trust_add( + mocker, + mocked_cmd, + mocked_logger: Mock, + mocked_sleep: Mock, + expected_resources_map: dict, + trust_list_spc: dict, + trust_list_secretsync: dict, + file_name: str, + secret_name: str, + expected_secret_sync: dict, + mocked_responses: responses, +): + file_content = b"\x00\x01\x02\x03" + instance_name = generate_random_string() + rg_name = "mock-rg" + + mocker.patch( + "azext_edge.edge.providers.orchestration.resources.connector.opcua.certs.OpcUACerts._get_cl_resources", + return_value=expected_resources_map["resources"], + ) + mocker.patch( + "azext_edge.edge.providers.orchestration.resources.connector.opcua.certs.read_file_content", + return_value=file_content, + ) + + if expected_resources_map["resources"]: + # get default spc + mocked_responses.add( + method=responses.GET, + url=get_spc_endpoint(spc_name="default-spc", resource_group_name=rg_name), + json=expected_resources_map["resources"][0], + status=200, + content_type="application/json", + ) + + if trust_list_spc: + setup_mock_common_responses( + mocked_responses=mocked_responses, + spc=trust_list_spc, + secretsync=trust_list_secretsync, + opcua_secretsync_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, + rg_name=rg_name, + secret_name=secret_name, + ) + + matched_target_key = False + mapping = trust_list_secretsync["properties"]["objectSecretMapping"] + + if mapping: + matched_target_key = mapping[0]["targetKey"] == os.path.basename(file_name) + + if not matched_target_key: + # set opcua secretsync + mocked_responses.add( + method=responses.PUT, + url=get_secretsync_endpoint( + secretsync_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, resource_group_name=rg_name + ), + json=expected_secret_sync, + status=200, + content_type="application/json", + ) + + result = add_connector_opcua_trust( + cmd=mocked_cmd, + instance_name=instance_name, + resource_group=rg_name, + file=file_name, + secret_name=secret_name, + ) + + if result: + if not trust_list_spc: + assert ( + mocked_logger.warning.call_args[0][0] == f"Azure Key Vault Secret Provider Class {OPCUA_SPC_NAME} " + "not found, creating new one..." + ) + return + + if not trust_list_secretsync: + assert ( + mocked_logger.warning.call_args[0][0] == f"Secret Sync {OPCUA_TRUST_LIST_SECRET_SYNC_NAME} " + "not found, creating new one..." + ) + return + assert result == expected_secret_sync + + +@pytest.mark.parametrize( + "expected_resources_map, trust_list_spc, trust_list_secretsync, file_name, secret_name, expected_error", + [ + ( + { + "resources": None, + "resource sync rules": None, + "custom locations": None, + "extensions": None, + "meta": { + "expected_total": 0, + }, + }, + {}, + {}, + "/fake/path/certificate1.crt", + None, + "Please enable secret sync before adding certificate.", + ), + # duplicate target key + ( + { + "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resource sync rules": [generate_ops_resource()], + "custom locations": [generate_ops_resource()], + "extensions": [generate_ops_resource()], + "meta": { + "expected_total": 4, + "resource_batches": 1, + }, + }, + get_mock_spc_record(spc_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, + resource_group_name="mock-rg", + objects=[ + { + "sourcePath": "secret1", + "targetKey": "certificate.der", + } + ], + ), + "/fake/path/certificate.der", + "new-secret", + "Cannot have duplicate targetKey in objectSecretMapping.", + ), + ], +) +def test_trust_add_error( + mocker, + mocked_cmd, + mocked_logger: Mock, + mocked_sleep: Mock, + expected_resources_map: dict, + trust_list_spc: dict, + trust_list_secretsync: dict, + file_name: str, + secret_name: str, + expected_error: str, + mocked_responses: responses, +): + file_content = b"\x00\x01\x02\x03" + instance_name = generate_random_string() + rg_name = "mock-rg" + + mocker.patch( + "azext_edge.edge.providers.orchestration.resources.connector.opcua.certs.OpcUACerts._get_cl_resources", + return_value=expected_resources_map["resources"], + ) + mocker.patch( + "azext_edge.edge.providers.orchestration.resources.connector.opcua.certs.read_file_content", + return_value=file_content, + ) + + if expected_resources_map["resources"]: + # get default spc + mocked_responses.add( + method=responses.GET, + url=get_spc_endpoint(spc_name="default-spc", resource_group_name=rg_name), + json=expected_resources_map["resources"][0], + status=200, + content_type="application/json", + ) + + if trust_list_spc: + setup_mock_common_responses( + mocked_responses=mocked_responses, + spc=trust_list_spc, + secretsync=trust_list_secretsync, + opcua_secretsync_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, + rg_name=rg_name, + secret_name=secret_name, + ) + + with pytest.raises(Exception) as e: + add_connector_opcua_trust( + cmd=mocked_cmd, + instance_name=instance_name, + resource_group=rg_name, + file=file_name, + secret_name=secret_name, + ) + assert expected_error in e.value.args[0] diff --git a/azext_edge/tests/edge/orchestration/test_deletion_unit.py b/azext_edge/tests/edge/orchestration/test_deletion_unit.py index 2ca77ffc9..c9fddb63d 100644 --- a/azext_edge/tests/edge/orchestration/test_deletion_unit.py +++ b/azext_edge/tests/edge/orchestration/test_deletion_unit.py @@ -11,6 +11,7 @@ from azext_edge.edge.providers.orchestration.deletion import IoTOperationsResource from azext_edge.edge.providers.orchestration.work import IOT_OPS_EXTENSION_TYPE +from azext_edge.tests.helpers import generate_ops_resource from ...generators import generate_random_string @@ -62,20 +63,6 @@ def spy_deletion_manager(mocker): } -def _generate_ops_resource(segments: int = 1) -> IoTOperationsResource: - resource_id = "" - for _ in range(segments): - resource_id = f"{resource_id}/{generate_random_string()}" - - resource = IoTOperationsResource( - resource_id=resource_id, - display_name=resource_id.split("/")[-1], - api_version=generate_random_string(), - ) - - return resource - - def _assemble_resource_map_mock( resource_map_mock: Mock, extensions: Optional[List[dict]], @@ -91,11 +78,7 @@ def _assemble_resource_map_mock( IOT_OPS_EXTENSION_TYPE: {"id": "aio-ext-id"} } resource_map_mock().extensions.append( - IoTOperationsResource( - resource_id="aio-ext-id", - display_name="aio-extension", - api_version="aio-ext-api" - ) + IoTOperationsResource(resource_id="aio-ext-id", display_name="aio-extension", api_version="aio-ext-api") ) @@ -113,11 +96,11 @@ def _assemble_resource_map_mock( }, { "resources": [ - _generate_ops_resource(4), + generate_ops_resource(4), ], - "resource sync rules": [_generate_ops_resource()], - "custom locations": [_generate_ops_resource()], - "extensions": [_generate_ops_resource()], + "resource sync rules": [generate_ops_resource()], + "custom locations": [generate_ops_resource()], + "extensions": [generate_ops_resource()], "meta": { "expected_total": 4, "resource_batches": 1, @@ -125,14 +108,14 @@ def _assemble_resource_map_mock( }, { "resources": [ - _generate_ops_resource(4), - _generate_ops_resource(4), - _generate_ops_resource(3), - _generate_ops_resource(1), + generate_ops_resource(4), + generate_ops_resource(4), + generate_ops_resource(3), + generate_ops_resource(1), ], "resource sync rules": [], - "custom locations": [_generate_ops_resource()], - "extensions": [_generate_ops_resource(), _generate_ops_resource()], + "custom locations": [generate_ops_resource()], + "extensions": [generate_ops_resource(), generate_ops_resource()], "meta": { "expected_total": 7, "resource_batches": 3, @@ -198,10 +181,10 @@ def test_batch_resources( }, }, { - "resources": [_generate_ops_resource(4), _generate_ops_resource(2)], - "resource sync rules": [_generate_ops_resource(), _generate_ops_resource()], - "custom locations": [_generate_ops_resource()], - "extensions": [_generate_ops_resource()], + "resources": [generate_ops_resource(4), generate_ops_resource(2)], + "resource sync rules": [generate_ops_resource(), generate_ops_resource()], + "custom locations": [generate_ops_resource()], + "extensions": [generate_ops_resource()], "meta": { "expected_total": 5, "resource_batches": 2, @@ -210,10 +193,10 @@ def test_batch_resources( }, # Currently no associated custom location means no non-extensions get deleted { - "resources": [_generate_ops_resource(4), _generate_ops_resource(2)], - "resource sync rules": [_generate_ops_resource()], + "resources": [generate_ops_resource(4), generate_ops_resource(2)], + "resource sync rules": [generate_ops_resource()], "custom locations": [], - "extensions": [_generate_ops_resource()], + "extensions": [generate_ops_resource()], "meta": { "expected_total": 4, "resource_batches": 2, @@ -224,7 +207,7 @@ def test_batch_resources( "resources": [], "resource sync rules": [], "custom locations": [], - "extensions": [_generate_ops_resource()], + "extensions": [generate_ops_resource()], "meta": { "expected_total": 1, "resource_batches": 0, diff --git a/azext_edge/tests/helpers.py b/azext_edge/tests/helpers.py index 2a163142e..add7040d9 100644 --- a/azext_edge/tests/helpers.py +++ b/azext_edge/tests/helpers.py @@ -14,6 +14,8 @@ import pytest from azext_edge.edge.providers.edge_api.base import EdgeResourceApi +from azext_edge.edge.providers.orchestration.resource_map import IoTOperationsResource +from azext_edge.tests.generators import generate_random_string logger = get_logger(__name__) @@ -188,3 +190,17 @@ def sort_kubectl_items_by_namespace( if include_all: sorted_items["_all_"][name] = item return sorted_items + + +def generate_ops_resource(segments: int = 1) -> IoTOperationsResource: + resource_id = "" + for _ in range(segments): + resource_id = f"{resource_id}/{generate_random_string()}" + + resource = IoTOperationsResource( + resource_id=resource_id, + display_name=resource_id.split("/")[-1], + api_version=generate_random_string(), + ) + + return resource From 7f78065ae26ad7d360001716436789d9bb682b0f Mon Sep 17 00:00:00 2001 From: Ryan K Date: Thu, 24 Oct 2024 09:08:25 -0700 Subject: [PATCH 15/26] fix: fix int test assertion casing in service response (#422) --- azext_edge/tests/edge/init/int/test_init_int.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azext_edge/tests/edge/init/int/test_init_int.py b/azext_edge/tests/edge/init/int/test_init_int.py index bf4445bf2..6bb0a4a34 100644 --- a/azext_edge/tests/edge/init/int/test_init_int.py +++ b/azext_edge/tests/edge/init/int/test_init_int.py @@ -226,7 +226,7 @@ def assert_aio_instance( instance_props = instance_show["properties"] assert instance_props.get("description") == description - assert instance_props["schemaRegistryRef"] == {"resource_id": schema_registry_id} + assert instance_props["schemaRegistryRef"] == {"resourceId": schema_registry_id} tree = run(f"az iot ops show -n {instance_name} -g {resource_group} --tree") # no resource sync rules if disable rsync rules From a561fd69d0d7b061be84d08cd64befa223ca57c1 Mon Sep 17 00:00:00 2001 From: Elsie4ever <3467996@gmail.com> Date: Thu, 24 Oct 2024 09:14:28 -0700 Subject: [PATCH 16/26] fix: remove prompts in `az iot ops connector opcua` (#423) * Remove prompt in cert management --------- Co-authored-by: Elsie Ju --- azext_edge/edge/_help.py | 11 +++ azext_edge/edge/commands_connector.py | 4 + azext_edge/edge/params.py | 12 +++ .../resources/connector/opcua/certs.py | 38 +++++---- .../resources/connector/opcua/conftest.py | 71 ++++++++-------- .../opcua/test_opcua_certs_issuer_unit.py | 83 ++++++++++++------- .../opcua/test_opcua_certs_trust_unit.py | 22 +++++ 7 files changed, 160 insertions(+), 81 deletions(-) diff --git a/azext_edge/edge/_help.py b/azext_edge/edge/_help.py index 5c5e73b19..2eddabcb7 100644 --- a/azext_edge/edge/_help.py +++ b/azext_edge/edge/_help.py @@ -1425,6 +1425,17 @@ def load_iotops_help(): az iot ops connector opcua client add --instance instance --resource-group instanceresourcegroup --public-key-file "newopc.der" --private-key-file "newopc.pem" --subject-name "aio-opc-opcuabroker" --application-uri "urn:microsoft.com:aio:opc:opcuabroker" + - name: Add an client certificate with custom public and private key secret name. + text: > + az iot ops connector opcua client add + --instance instance + --resource-group instanceresourcegroup + --public-key-file "newopc.der" + --private-key-file "newopc.pem" + --public-key-secret public-secret-name + --private-key-secret private-secret-name + --subject-name "aio-opc-opcuabroker" + --application-uri "urn:microsoft.com:aio:opc:opcuabroker" """ helps[ diff --git a/azext_edge/edge/commands_connector.py b/azext_edge/edge/commands_connector.py index 317ffc242..69f11c224 100644 --- a/azext_edge/edge/commands_connector.py +++ b/azext_edge/edge/commands_connector.py @@ -46,6 +46,8 @@ def add_connector_opcua_client( private_key_file: str, subject_name: str, application_uri: str, + public_key_secret_name: Optional[str] = None, + private_key_secret_name: Optional[str] = None, ) -> dict: return OpcUACerts(cmd).client_add( instance_name=instance_name, @@ -54,4 +56,6 @@ def add_connector_opcua_client( private_key_file=private_key_file, subject_name=subject_name, application_uri=application_uri, + public_key_secret_name=public_key_secret_name, + private_key_secret_name=private_key_secret_name, ) diff --git a/azext_edge/edge/params.py b/azext_edge/edge/params.py index 4dc525542..8320372a7 100644 --- a/azext_edge/edge/params.py +++ b/azext_edge/edge/params.py @@ -1332,6 +1332,18 @@ def load_iotops_arguments(self, _): options_list=["--application-uri", "--au"], help="The application instance URI embedded in the application instance.", ) + context.argument( + "public_key_secret_name", + options_list=["--public-key-secret", "--pks"], + help="Public key secret name in the Key Vault. If not provided, the " + "certificate file name will be used to generate the secret name.", + ) + context.argument( + "private_key_secret_name", + options_list=["--private-key-secret", "--prks"], + help="Private key secret name in the Key Vault. If not provided, the " + "certificate file name will be used to generate the secret name.", + ) with self.argument_context("iot ops schema version") as context: context.argument( diff --git a/azext_edge/edge/providers/orchestration/resources/connector/opcua/certs.py b/azext_edge/edge/providers/orchestration/resources/connector/opcua/certs.py index f4e2c8130..69730f7ef 100644 --- a/azext_edge/edge/providers/orchestration/resources/connector/opcua/certs.py +++ b/azext_edge/edge/providers/orchestration/resources/connector/opcua/certs.py @@ -71,7 +71,7 @@ def trust_add(self, instance_name: str, resource_group: str, file: str, secret_n secret_name = secret_name if secret_name else file_name.replace(".", "-") # iterate over secrets to check if secret with same name exists - secret_name = self._check_and_update_secret_name(secrets, secret_name, spc_keyvault_name) + secret_name = self._check_secret_name(secrets, secret_name, spc_keyvault_name, "secret") self._upload_to_key_vault(secret_name, file, cert_extension) # check if there is a spc called "opc-ua-connector", if not create one @@ -160,7 +160,7 @@ def issuer_add( secret_name = secret_name if secret_name else file_name.replace(".", "-") # iterate over secrets to check if secret with same name exists - secret_name = self._check_and_update_secret_name(secrets, secret_name, spc_keyvault_name) + secret_name = self._check_secret_name(secrets, secret_name, spc_keyvault_name, "secret") self._upload_to_key_vault(secret_name, file, cert_extension) # check if there is a spc called "opc-ua-connector", if not create one @@ -197,6 +197,8 @@ def client_add( private_key_file: str, subject_name: str, application_uri: str, + public_key_secret_name: Optional[str] = None, + private_key_secret_name: Optional[str] = None, ) -> dict: # inform user if the provided cert was issued by a CA, the CA cert must be added to the issuers list. logger.warning("Please ensure the certificate must be added to the issuers list if it was issued by a CA. ") @@ -242,11 +244,22 @@ def client_add( file_name = os.path.basename(file) file_name_info = os.path.splitext(file_name) cert_extension = file_name_info[1].replace(".", "") - cert_name = file_name_info[0].replace(".", "-") - secret_name = f"{cert_name}-{cert_extension}" + secret_name = f"{file_name_info[0]}-{cert_extension}" + + file_type_map = { + public_key_file: ( + "public-key-secret", public_key_secret_name if public_key_secret_name else secret_name + ), + private_key_file: ( + "private-key-secret", private_key_secret_name if private_key_secret_name else secret_name + ) + } - # iterate over secrets to check if secret with same name exists - secret_name = self._check_and_update_secret_name(secrets, secret_name, spc_keyvault_name) + # Iterate over secrets to check if a secret with the same name exists + if file in file_type_map: + flag, secret_name = file_type_map[file] + secret_name = secret_name.replace(".", "-") + secret_name = self._check_secret_name(secrets, secret_name, spc_keyvault_name, flag) self._upload_to_key_vault(secret_name, file, cert_extension) secrets_to_add.append((secret_name, file_name)) @@ -330,23 +343,16 @@ def _find_existing_spc(self, instance_name: str, cl_resources: List[dict]) -> di return secretsync_spc - def _check_and_update_secret_name(self, secrets: PageIterator, secret_name: str, spc_keyvault_name: str) -> str: - from rich.prompt import Confirm, Prompt + def _check_secret_name(self, secrets: PageIterator, secret_name: str, spc_keyvault_name: str, flag: str) -> str: new_secret_name = secret_name for secret in secrets: if secret.id.endswith(secret_name): - # Prompt user to decide on overwriting the secret - overwrite_secret = Confirm.ask( + raise InvalidArgumentValueError( f"Secret with name {secret_name} already exists in keyvault {spc_keyvault_name}. " - "Do you want to overwrite the secret name?", + f"Please provide a different name via --{flag}." ) - if overwrite_secret: - new_secret_name = Prompt.ask("Please enter the new secret name") - - return new_secret_name - return new_secret_name def _upload_to_key_vault(self, secret_name: str, file_path: str, cert_extension: str): diff --git a/azext_edge/tests/edge/orchestration/resources/connector/opcua/conftest.py b/azext_edge/tests/edge/orchestration/resources/connector/opcua/conftest.py index f8e90c23f..1bc47570a 100644 --- a/azext_edge/tests/edge/orchestration/resources/connector/opcua/conftest.py +++ b/azext_edge/tests/edge/orchestration/resources/connector/opcua/conftest.py @@ -131,41 +131,42 @@ def setup_mock_common_responses( content_type="application/json", ) - # set secret - mocked_responses.add( - method=responses.PUT, - url=get_secret_endpoint(keyvault_name="mock-keyvault", secret_name=secret_name), - json={}, - status=200, - content_type="application/json", - ) - - # get opcua spc - mocked_responses.add( - method=responses.GET, - url=get_spc_endpoint(spc_name=OPCUA_SPC_NAME, resource_group_name=rg_name), - json=spc, - status=200, - content_type="application/json", - ) - - # set opcua spc - mocked_responses.add( - method=responses.PUT, - url=get_spc_endpoint(spc_name=OPCUA_SPC_NAME, resource_group_name=rg_name), - json={}, - status=200, - content_type="application/json", - ) - - # get opcua secretsync - mocked_responses.add( - method=responses.GET, - url=get_secretsync_endpoint(secretsync_name=opcua_secretsync_name, resource_group_name=rg_name), - json=secretsync, - status=200, - content_type="application/json", - ) + if secret_name != "mock-secret": + # set secret + mocked_responses.add( + method=responses.PUT, + url=get_secret_endpoint(keyvault_name="mock-keyvault", secret_name=secret_name), + json={}, + status=200, + content_type="application/json", + ) + + # get opcua spc + mocked_responses.add( + method=responses.GET, + url=get_spc_endpoint(spc_name=OPCUA_SPC_NAME, resource_group_name=rg_name), + json=spc, + status=200, + content_type="application/json", + ) + + # set opcua spc + mocked_responses.add( + method=responses.PUT, + url=get_spc_endpoint(spc_name=OPCUA_SPC_NAME, resource_group_name=rg_name), + json={}, + status=200, + content_type="application/json", + ) + + # get opcua secretsync + mocked_responses.add( + method=responses.GET, + url=get_secretsync_endpoint(secretsync_name=opcua_secretsync_name, resource_group_name=rg_name), + json=secretsync, + status=200, + content_type="application/json", + ) def assemble_resource_map_mock( diff --git a/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_issuer_unit.py b/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_issuer_unit.py index 994f9eab0..ede0373df 100644 --- a/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_issuer_unit.py +++ b/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_issuer_unit.py @@ -287,6 +287,28 @@ def test_issuer_add( "new-secret", "Cannot have duplicate targetKey in objectSecretMapping.", ), + # secret existed + ( + { + "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resource sync rules": [generate_ops_resource()], + "custom locations": [generate_ops_resource()], + "extensions": [generate_ops_resource()], + "meta": { + "expected_total": 4, + "resource_batches": 1, + }, + }, + get_mock_spc_record(spc_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, + resource_group_name="mock-rg", + ), + "/fake/path/certificate.der", + "mock-secret", + "Secret with name mock-secret already exists in keyvault mock-keyvault. " + "Please provide a different name via --secret.", + ), ], ) def test_issuer_add_errors( @@ -352,45 +374,46 @@ def test_issuer_add_errors( content_type="application/json", ) - # set secret - mocked_responses.add( - method=responses.PUT, - url=get_secret_endpoint(keyvault_name="mock-keyvault", secret_name=secret_name), - json={}, - status=200, - content_type="application/json", - ) - - # get opcua spc - mocked_responses.add( - method=responses.GET, - url=get_spc_endpoint(spc_name=OPCUA_SPC_NAME, resource_group_name=rg_name), - json=issuer_list_spc, - status=200, - content_type="application/json", - ) + if secret_name != "mock-secret": + # set secret + mocked_responses.add( + method=responses.PUT, + url=get_secret_endpoint(keyvault_name="mock-keyvault", secret_name=secret_name), + json={}, + status=200, + content_type="application/json", + ) - # set opcua spc - mocked_responses.add( - method=responses.PUT, - url=get_spc_endpoint(spc_name=OPCUA_SPC_NAME, resource_group_name=rg_name), - json={}, - status=200, - content_type="application/json", - ) + # get opcua spc + mocked_responses.add( + method=responses.GET, + url=get_spc_endpoint(spc_name=OPCUA_SPC_NAME, resource_group_name=rg_name), + json=issuer_list_spc, + status=200, + content_type="application/json", + ) - if not file_name.endswith("der"): - # set opcua secretsync + # set opcua spc mocked_responses.add( method=responses.PUT, - url=get_secretsync_endpoint( - secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name=rg_name - ), + url=get_spc_endpoint(spc_name=OPCUA_SPC_NAME, resource_group_name=rg_name), json={}, status=200, content_type="application/json", ) + if not file_name.endswith("der"): + # set opcua secretsync + mocked_responses.add( + method=responses.PUT, + url=get_secretsync_endpoint( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name=rg_name + ), + json={}, + status=200, + content_type="application/json", + ) + with pytest.raises(Exception) as e: add_connector_opcua_issuer( cmd=mocked_cmd, diff --git a/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_trust_unit.py b/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_trust_unit.py index 33541bfd5..863314228 100644 --- a/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_trust_unit.py +++ b/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_trust_unit.py @@ -161,6 +161,28 @@ def test_trust_add( None, "Please enable secret sync before adding certificate.", ), + # secret existed + ( + { + "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resource sync rules": [generate_ops_resource()], + "custom locations": [generate_ops_resource()], + "extensions": [generate_ops_resource()], + "meta": { + "expected_total": 4, + "resource_batches": 1, + }, + }, + get_mock_spc_record(spc_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, + resource_group_name="mock-rg", + ), + "/fake/path/certificate.der", + "mock-secret", + "Secret with name mock-secret already exists in keyvault mock-keyvault. " + "Please provide a different name via --secret.", + ), # duplicate target key ( { From 122620e6a8ad64a74a856e31d95992143a8f4791 Mon Sep 17 00:00:00 2001 From: Elsie4ever <3467996@gmail.com> Date: Fri, 25 Oct 2024 12:08:25 -0700 Subject: [PATCH 17/26] fix: cert management look up existing resource use cl + rg instead of just rg (#424) --- .../resources/connector/opcua/certs.py | 116 ++++++++++-------- .../opcua/test_opcua_certs_client_unit.py | 21 +++- .../opcua/test_opcua_certs_issuer_unit.py | 40 +++++- .../opcua/test_opcua_certs_trust_unit.py | 24 +++- 4 files changed, 140 insertions(+), 61 deletions(-) diff --git a/azext_edge/edge/providers/orchestration/resources/connector/opcua/certs.py b/azext_edge/edge/providers/orchestration/resources/connector/opcua/certs.py index 69730f7ef..66695d34c 100644 --- a/azext_edge/edge/providers/orchestration/resources/connector/opcua/certs.py +++ b/azext_edge/edge/providers/orchestration/resources/connector/opcua/certs.py @@ -30,6 +30,8 @@ console = Console() +SPC_RESOURCE_TYPE = "microsoft.secretsynccontroller/azurekeyvaultsecretproviderclasses" +SECRET_SYNC_RESOURCE_TYPE = "microsoft.secretsynccontroller/secretsyncs" OPCUA_SPC_NAME = "opc-ua-connector" OPCUA_TRUST_LIST_SECRET_SYNC_NAME = "aio-opc-ua-broker-trust-list" OPCUA_ISSUER_LIST_SECRET_SYNC_NAME = "aio-opc-ua-broker-issuer-list" @@ -75,13 +77,11 @@ def trust_add(self, instance_name: str, resource_group: str, file: str, secret_n self._upload_to_key_vault(secret_name, file, cert_extension) # check if there is a spc called "opc-ua-connector", if not create one - try: - opcua_spc = self.ssc_mgmt_client.azure_key_vault_secret_provider_classes.get( - resource_group_name=resource_group, - azure_key_vault_secret_provider_class_name=OPCUA_SPC_NAME, - ) - except ResourceNotFoundError: - opcua_spc = {} + opcua_spc = self._find_resource_from_cl_resources( + cl_resources=cl_resources, + resource_type=SPC_RESOURCE_TYPE, + resource_name=OPCUA_SPC_NAME, + ) self._add_secrets_to_spc( secrets=[secret_name], @@ -93,13 +93,11 @@ def trust_add(self, instance_name: str, resource_group: str, file: str, secret_n ) # check if there is a secret sync called "aio-opc-ua-broker-trust-list ", if not create one - try: - opcua_secret_sync = self.ssc_mgmt_client.secret_syncs.get( - resource_group_name=resource_group, - secret_sync_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, - ) - except ResourceNotFoundError: - opcua_secret_sync = {} + opcua_secret_sync = self._find_resource_from_cl_resources( + cl_resources=cl_resources, + resource_type=SECRET_SYNC_RESOURCE_TYPE, + resource_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, + ) return self._add_secrets_to_secret_sync( secrets=[(secret_name, file_name)], @@ -135,13 +133,11 @@ def issuer_add( # get cert name by removing extension cert_name = os.path.splitext(file_name)[0] - try: - opcua_secret_sync = self.ssc_mgmt_client.secret_syncs.get( - resource_group_name=resource_group, - secret_sync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, - ) - except ResourceNotFoundError: - opcua_secret_sync = {} + opcua_secret_sync = self._find_resource_from_cl_resources( + cl_resources=cl_resources, + resource_type=SECRET_SYNC_RESOURCE_TYPE, + resource_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, + ) if cert_extension == ".crl": matched_names = [] @@ -164,13 +160,11 @@ def issuer_add( self._upload_to_key_vault(secret_name, file, cert_extension) # check if there is a spc called "opc-ua-connector", if not create one - try: - opcua_spc = self.ssc_mgmt_client.azure_key_vault_secret_provider_classes.get( - resource_group_name=resource_group, - azure_key_vault_secret_provider_class_name=OPCUA_SPC_NAME, - ) - except ResourceNotFoundError: - opcua_spc = {} + opcua_spc = self._find_resource_from_cl_resources( + cl_resources=cl_resources, + resource_type=SPC_RESOURCE_TYPE, + resource_name=OPCUA_SPC_NAME, + ) self._add_secrets_to_spc( secrets=[secret_name], @@ -222,22 +216,18 @@ def client_add( secrets: PageIterator = self.keyvault_client.list_properties_of_secrets() # check if there is a spc called "opc-ua-connector", if not create one - try: - opcua_spc = self.ssc_mgmt_client.azure_key_vault_secret_provider_classes.get( - resource_group_name=resource_group, - azure_key_vault_secret_provider_class_name=OPCUA_SPC_NAME, - ) - except ResourceNotFoundError: - opcua_spc = {} + opcua_spc = self._find_resource_from_cl_resources( + cl_resources=cl_resources, + resource_type=SPC_RESOURCE_TYPE, + resource_name=OPCUA_SPC_NAME, + ) # check if there is a secret sync called "aio-opc-ua-broker-client-certificate", if not create one - try: - opcua_secret_sync = self.ssc_mgmt_client.secret_syncs.get( - resource_group_name=resource_group, - secret_sync_name=OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, - ) - except ResourceNotFoundError: - opcua_secret_sync = {} + opcua_secret_sync = self._find_resource_from_cl_resources( + cl_resources=cl_resources, + resource_type=SECRET_SYNC_RESOURCE_TYPE, + resource_name=OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, + ) secrets_to_add = [] for file in [public_key_file, private_key_file]: @@ -326,14 +316,10 @@ def _find_existing_spc(self, instance_name: str, cl_resources: List[dict]) -> di secretsync_spc = None if cl_resources: - for resource in cl_resources: - if resource["type"].lower() == "microsoft.secretsynccontroller/azurekeyvaultsecretproviderclasses": - resource_id_container = parse_resource_id(resource["id"]) - secretsync_spc = self.ssc_mgmt_client.azure_key_vault_secret_provider_classes.get( - resource_group_name=resource_id_container.resource_group_name, - azure_key_vault_secret_provider_class_name=resource_id_container.resource_name, - ) - break + secretsync_spc = self._find_resource_from_cl_resources( + cl_resources=cl_resources, + resource_type=SPC_RESOURCE_TYPE, + ) if not secretsync_spc: raise ResourceNotFoundError( @@ -343,8 +329,36 @@ def _find_existing_spc(self, instance_name: str, cl_resources: List[dict]) -> di return secretsync_spc - def _check_secret_name(self, secrets: PageIterator, secret_name: str, spc_keyvault_name: str, flag: str) -> str: + # TODO: consider moving under instance as common method + def _find_resource_from_cl_resources( + self, + cl_resources: List[dict], + resource_type: str, + resource_name: Optional[str] = None, + ) -> dict: + for resource in cl_resources: + resource_id_container = parse_resource_id(resource["id"]) + cl_resource_name = resource_id_container.resource_name + # Ensure both type and name (if specified) match the resource + is_name_matched = resource_name is None or cl_resource_name == resource_name + is_type_matched = resource["type"].lower() == resource_type + + if is_type_matched and is_name_matched: + if resource_type == SPC_RESOURCE_TYPE: + return self.ssc_mgmt_client.azure_key_vault_secret_provider_classes.get( + resource_group_name=resource_id_container.resource_group_name, + azure_key_vault_secret_provider_class_name=cl_resource_name, + ) + elif resource_type == SECRET_SYNC_RESOURCE_TYPE: + return self.ssc_mgmt_client.secret_syncs.get( + resource_group_name=resource_id_container.resource_group_name, + secret_sync_name=cl_resource_name, + ) + + return {} + + def _check_secret_name(self, secrets: PageIterator, secret_name: str, spc_keyvault_name: str, flag: str) -> str: new_secret_name = secret_name for secret in secrets: if secret.id.endswith(secret_name): diff --git a/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_client_unit.py b/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_client_unit.py index 548d32b5d..4b6bb791c 100644 --- a/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_client_unit.py +++ b/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_client_unit.py @@ -35,7 +35,13 @@ [ ( { - "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resources": [ + get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg"), + get_mock_spc_record(spc_name=OPCUA_SPC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, resource_group_name="mock-rg" + ), + ], "resource sync rules": [generate_ops_resource()], "custom locations": [generate_ops_resource()], "extension": {IOT_OPS_EXTENSION_TYPE: {"id": "aio-ext-id", "name": "aio-ext-name", "properties": {}}}, @@ -140,6 +146,11 @@ def test_client_add( subject_name="subjectname", ) + assert ( + mocked_logger.warning.call_args[0][0] == "Please ensure the certificate must be added " + "to the issuers list if it was issued by a CA. " + ) + if result: if not client_app_spc: assert ( @@ -191,7 +202,13 @@ def test_client_add( # no aio extension ( { - "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resources": [ + get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg"), + get_mock_spc_record(spc_name=OPCUA_SPC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_CLIENT_CERT_SECRET_SYNC_NAME, resource_group_name="mock-rg" + ), + ], "resource sync rules": [generate_ops_resource()], "custom locations": [generate_ops_resource()], "extension": {}, diff --git a/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_issuer_unit.py b/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_issuer_unit.py index ede0373df..d614200c0 100644 --- a/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_issuer_unit.py +++ b/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_issuer_unit.py @@ -30,7 +30,13 @@ [ ( { - "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resources": [ + get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg"), + get_mock_spc_record(spc_name=OPCUA_SPC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg" + ), + ], "resource sync rules": [generate_ops_resource()], "custom locations": [generate_ops_resource()], "extensions": [generate_ops_resource()], @@ -54,7 +60,13 @@ # adding .crl with corresponding .der or crt ( { - "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resources": [ + get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg"), + get_mock_spc_record(spc_name=OPCUA_SPC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg" + ), + ], "resource sync rules": [generate_ops_resource()], "custom locations": [generate_ops_resource()], "extensions": [generate_ops_resource()], @@ -244,7 +256,13 @@ def test_issuer_add( # adding .crl without corresponding .der or crt ( { - "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resources": [ + get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg"), + get_mock_spc_record(spc_name=OPCUA_SPC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg" + ), + ], "resource sync rules": [generate_ops_resource()], "custom locations": [generate_ops_resource()], "extensions": [generate_ops_resource()], @@ -264,7 +282,13 @@ def test_issuer_add( # duplicate targetKey in objectSecretMapping ( { - "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resources": [ + get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg"), + get_mock_spc_record(spc_name=OPCUA_SPC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg" + ), + ], "resource sync rules": [generate_ops_resource()], "custom locations": [generate_ops_resource()], "extensions": [generate_ops_resource()], @@ -290,7 +314,13 @@ def test_issuer_add( # secret existed ( { - "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resources": [ + get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg"), + get_mock_spc_record(spc_name=OPCUA_SPC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_ISSUER_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg" + ), + ], "resource sync rules": [generate_ops_resource()], "custom locations": [generate_ops_resource()], "extensions": [generate_ops_resource()], diff --git a/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_trust_unit.py b/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_trust_unit.py index 863314228..e9c70fcf6 100644 --- a/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_trust_unit.py +++ b/azext_edge/tests/edge/orchestration/resources/connector/opcua/test_opcua_certs_trust_unit.py @@ -30,7 +30,13 @@ [ ( { - "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resources": [ + get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg"), + get_mock_spc_record(spc_name=OPCUA_SPC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg" + ), + ], "resource sync rules": [generate_ops_resource()], "custom locations": [generate_ops_resource()], "extensions": [generate_ops_resource()], @@ -164,7 +170,13 @@ def test_trust_add( # secret existed ( { - "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resources": [ + get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg"), + get_mock_spc_record(spc_name=OPCUA_SPC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg" + ), + ], "resource sync rules": [generate_ops_resource()], "custom locations": [generate_ops_resource()], "extensions": [generate_ops_resource()], @@ -186,7 +198,13 @@ def test_trust_add( # duplicate target key ( { - "resources": [get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg")], + "resources": [ + get_mock_spc_record(spc_name="default-spc", resource_group_name="mock-rg"), + get_mock_spc_record(spc_name=OPCUA_SPC_NAME, resource_group_name="mock-rg"), + get_mock_secretsync_record( + secretsync_name=OPCUA_TRUST_LIST_SECRET_SYNC_NAME, resource_group_name="mock-rg" + ), + ], "resource sync rules": [generate_ops_resource()], "custom locations": [generate_ops_resource()], "extensions": [generate_ops_resource()], From a2f678321860fdbcb81b3d621272327a9876d246 Mon Sep 17 00:00:00 2001 From: Victoria Litvinova <73560279+vilit1@users.noreply.github.com> Date: Fri, 25 Oct 2024 16:45:18 -0700 Subject: [PATCH 18/26] fix: changes for `az iot ops upgrade` display text (#425) --- azext_edge/edge/providers/orchestration/upgrade.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/azext_edge/edge/providers/orchestration/upgrade.py b/azext_edge/edge/providers/orchestration/upgrade.py index 368125cbc..9c491e9b3 100644 --- a/azext_edge/edge/providers/orchestration/upgrade.py +++ b/azext_edge/edge/providers/orchestration/upgrade.py @@ -112,7 +112,7 @@ def do_work(self, confirm_yes: Optional[bool] = None): print() print("[yellow]Upgrading may fail and require you to delete and re-create your cluster.[/yellow]") - should_bail = not should_continue_prompt(confirm_yes=confirm_yes) + should_bail = not should_continue_prompt(confirm_yes=confirm_yes, context="Upgrade") if should_bail: return @@ -155,8 +155,9 @@ def _check_extensions(self) -> str: "properties" : { "autoUpgradeMinorVersion": "false", "releaseTrain": train_map[extension_key], - "version": version_map[extension_key] - } + "version": version_map[extension_key], + }, + "currentVersion": current_version } if extension_type == "microsoft.openservicemesh": @@ -186,8 +187,9 @@ def _check_extensions(self) -> str: # text to print (ordered) display_desc = "[dim]" for extension, update in self.extensions_to_update.items(): - version = update["properties"]["version"] - display_desc += f"• {extension}: {version}\n" + new_version = update["properties"]["version"] + old_version = update.pop("currentVersion") + display_desc += f"• {extension}: {old_version} -> {new_version}\n" return display_desc[:-1] + "" def _get_resource_map(self) -> IoTOperationsResourceMap: From 281131917e5ce68b2ef9da4d69e7cf2521928618 Mon Sep 17 00:00:00 2001 From: Ryan K Date: Mon, 28 Oct 2024 11:00:27 -0700 Subject: [PATCH 19/26] chore: template update to use AIO extension 0.8.30 and version update to 0.8.0a2 (#428) --- azext_edge/constants.py | 2 +- azext_edge/edge/providers/orchestration/template.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/azext_edge/constants.py b/azext_edge/constants.py index 91b2532d5..bda3409de 100644 --- a/azext_edge/constants.py +++ b/azext_edge/constants.py @@ -7,7 +7,7 @@ import os -VERSION = "0.8.0a1" +VERSION = "0.8.0a2" EXTENSION_NAME = "azure-iot-ops" EXTENSION_ROOT = os.path.dirname(os.path.abspath(__file__)) USER_AGENT = "IotOperationsCliExtension/{}".format(VERSION) diff --git a/azext_edge/edge/providers/orchestration/template.py b/azext_edge/edge/providers/orchestration/template.py index 52e405bc4..7cf4b8b25 100644 --- a/azext_edge/edge/providers/orchestration/template.py +++ b/azext_edge/edge/providers/orchestration/template.py @@ -358,7 +358,7 @@ def copy(self) -> "TemplateBlueprint": "languageVersion": "2.0", "contentVersion": "1.0.0.0", "metadata": { - "_generator": {"name": "bicep", "version": "0.30.23.60470", "templateHash": "12344544595454159338"} + "_generator": {"name": "bicep", "version": "0.30.23.60470", "templateHash": "4796673238078710779"} }, "definitions": { "_1.AdvancedConfig": { @@ -530,7 +530,7 @@ def copy(self) -> "TemplateBlueprint": "variables": { "AIO_EXTENSION_SUFFIX": "[take(uniqueString(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName'))), 5)]", "AIO_EXTENSION_SCOPE": {"cluster": {"releaseNamespace": "azure-iot-operations"}}, - "VERSIONS": {"iotOperations": "0.8.25"}, + "VERSIONS": {"iotOperations": "0.8.30"}, "TRAINS": {"iotOperations": "integration"}, "MQTT_SETTINGS": { "brokerListenerServiceName": "aio-broker", From c08d93f17851115875255fc96f0f078508e5663f Mon Sep 17 00:00:00 2001 From: Ryan K Date: Mon, 28 Oct 2024 12:17:11 -0700 Subject: [PATCH 20/26] fix(ci): container publish workflow updates (#427) --- .github/workflows/int_test.yml | 20 +++++++++++++++---- .../publish_test_container_image.yml | 6 ++++++ 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/.github/workflows/int_test.yml b/.github/workflows/int_test.yml index f4f9c4698..f1037d82b 100644 --- a/.github/workflows/int_test.yml +++ b/.github/workflows/int_test.yml @@ -22,6 +22,11 @@ on: type: string required: false default: '51dfe1e8-70c6-4de5-a08e-e18aff23d815' + init-continue-on-error: + description: Continue on error for init integration tests + type: boolean + required: false + default: true use-container: description: Build container image for tests type: boolean @@ -228,8 +233,12 @@ jobs: - name: "Containerized tests" if: ${{ matrix.feature == 'default' && inputs.use-container }} env: - azext_edge_rg: ${{ steps.env_out.outputs.CLUSTER_NAME }} - azext_edge_cluster: ${{ steps.env_out.outputs.RESOURCE_GROUP }} + azext_edge_skip_init: true # skip init tests in container + azext_edge_init_redeployment: false # ensure no redeployment in container + AIO_CLI_INIT_PREFLIGHT_DISABLED: ${{ steps.init.outputs.NO_PREFLIGHT }} + azext_edge_rg: ${{ steps.env_out.outputs.RESOURCE_GROUP }} + azext_edge_cluster: ${{ steps.env_out.outputs.CLUSTER_NAME }} + azext_edge_instance: ${{ steps.env_out.outputs.INSTANCE_NAME }} run: | # volume mounts azure_dir=$(realpath ~/.azure) @@ -239,9 +248,12 @@ jobs: # env vars envVars=() - envVars+=("-e" "azext_edge_cluster=$azext_edge_rg") - envVars+=("-e" "azext_edge_rg=$azext_edge_cluster") envVars+=("-e" "azext_edge_skip_init=$azext_edge_skip_init") + envVars+=("-e" "azext_edge_init_redeployment=$azext_edge_init_redeployment") + envVars+=("-e" "AIO_CLI_INIT_PREFLIGHT_DISABLED=$AIO_CLI_INIT_PREFLIGHT_DISABLED") + envVars+=("-e" "azext_edge_rg=$azext_edge_rg") + envVars+=("-e" "azext_edge_cluster=$azext_edge_cluster") + envVars+=("-e" "azext_edge_instance=$azext_edge_instance") envVars+=("-e" "KUBECONFIG=$kubeconfig_mount") # Run tests diff --git a/.github/workflows/publish_test_container_image.yml b/.github/workflows/publish_test_container_image.yml index e08ae9484..5660eefb1 100644 --- a/.github/workflows/publish_test_container_image.yml +++ b/.github/workflows/publish_test_container_image.yml @@ -6,6 +6,11 @@ on: type: boolean required: true default: false + continue-on-error: + description: Continue on error for init integration tests + type: boolean + required: false + default: true name: Publish Integration Test Container Image run-name: Publish Test Container${{ inputs.promote && ' [stable]' || '' }} permissions: @@ -18,6 +23,7 @@ jobs: with: resource-group: ops-cli-int-test-rg use-container: true + init-continue-on-error: ${{ inputs.continue-on-error }} secrets: AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }} AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} From d1ed05b1cd9127ca96ba0257042df4af0b0cde53 Mon Sep 17 00:00:00 2001 From: Victoria Litvinova <73560279+vilit1@users.noreply.github.com> Date: Mon, 28 Oct 2024 15:25:52 -0700 Subject: [PATCH 21/26] fix: Ensure consistent verbiage (#426) --- azext_edge/edge/_help.py | 4 ++-- azext_edge/edge/providers/orchestration/permissions.py | 2 +- azext_edge/edge/providers/orchestration/work.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/azext_edge/edge/_help.py b/azext_edge/edge/_help.py index 2eddabcb7..e4f325bf0 100644 --- a/azext_edge/edge/_help.py +++ b/azext_edge/edge/_help.py @@ -1288,8 +1288,8 @@ def load_iotops_help(): This operation will create a schema registry with system managed identity enabled. It will then assign the system identity the built-in "Storage Blob Data Contributor" - role against the storage account scope by default. If necessary you can provide a custom - role via --custom-role-id to use instead. + role against the storage account container scope by default. If necessary you can provide a + custom role via --custom-role-id to use instead. If the indicated storage account container does not exist it will be created with default settings. diff --git a/azext_edge/edge/providers/orchestration/permissions.py b/azext_edge/edge/providers/orchestration/permissions.py index 086bbbff5..d1f976c1b 100644 --- a/azext_edge/edge/providers/orchestration/permissions.py +++ b/azext_edge/edge/providers/orchestration/permissions.py @@ -47,7 +47,7 @@ def verify_write_permission_against_rg(subscription_id: str, resource_group_name "This IoT Operations deployment config includes resource sync rules which require the logged-in principal\n" "to have permission to write role assignments (Microsoft.Authorization/roleAssignments/write) " "against the resource group.\n\n" - "Use --disable-rsync-rules to not include resource sync rules in the deployment.\n" + "Run the command with --enable-rsync False to not include resource sync rules in the deployment.\n" ) diff --git a/azext_edge/edge/providers/orchestration/work.py b/azext_edge/edge/providers/orchestration/work.py index 273e7e8ba..a6570c77b 100644 --- a/azext_edge/edge/providers/orchestration/work.py +++ b/azext_edge/edge/providers/orchestration/work.py @@ -357,7 +357,7 @@ def _do_work(self): # noqa: C901 if any(not v for v in self._extension_map.values()): raise ValidationError( "Foundational service installation not detected. " - "Instance deployment will not continue. Please run init." + "Instance deployment will not continue. Please run `az iot ops init`." ) instance_work_name = self._work_format_str.format(op="instance") From a66bd2545c0fe1933b13244c675903240fa11dbf Mon Sep 17 00:00:00 2001 From: Paymaun Date: Wed, 30 Oct 2024 15:24:27 -0700 Subject: [PATCH 22/26] refactor: template refresh ops 0.8.32 preview (#432) --- azext_edge/edge/providers/orchestration/template.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/azext_edge/edge/providers/orchestration/template.py b/azext_edge/edge/providers/orchestration/template.py index 7cf4b8b25..ea4e71d4b 100644 --- a/azext_edge/edge/providers/orchestration/template.py +++ b/azext_edge/edge/providers/orchestration/template.py @@ -352,13 +352,13 @@ def copy(self) -> "TemplateBlueprint": ) M3_INSTANCE_TEMPLATE = TemplateBlueprint( - commit_id="373335547851df70d512b7ec81aedfba0d660ae5", + commit_id="5cd08fe900b47ea82b1fa88643e71520ebfb7b80", content={ "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", "languageVersion": "2.0", "contentVersion": "1.0.0.0", "metadata": { - "_generator": {"name": "bicep", "version": "0.30.23.60470", "templateHash": "4796673238078710779"} + "_generator": {"name": "bicep", "version": "0.30.23.60470", "templateHash": "6209031414191159933"} }, "definitions": { "_1.AdvancedConfig": { @@ -530,8 +530,8 @@ def copy(self) -> "TemplateBlueprint": "variables": { "AIO_EXTENSION_SUFFIX": "[take(uniqueString(resourceId('Microsoft.Kubernetes/connectedClusters', parameters('clusterName'))), 5)]", "AIO_EXTENSION_SCOPE": {"cluster": {"releaseNamespace": "azure-iot-operations"}}, - "VERSIONS": {"iotOperations": "0.8.30"}, - "TRAINS": {"iotOperations": "integration"}, + "VERSIONS": {"iotOperations": "0.8.32"}, + "TRAINS": {"iotOperations": "preview"}, "MQTT_SETTINGS": { "brokerListenerServiceName": "aio-broker", "brokerListenerPort": 18883, From 1f39ab78c0d8358868bdc78d25b6aaa1def45762 Mon Sep 17 00:00:00 2001 From: Ryan K Date: Wed, 30 Oct 2024 15:39:49 -0700 Subject: [PATCH 23/26] feat: enable --user-trust configuration on init and create (#431) --- azext_edge/edge/_help.py | 14 ++++++--- azext_edge/edge/commands_edge.py | 6 ++-- azext_edge/edge/params.py | 8 +++++ .../edge/providers/orchestration/targets.py | 10 +++++-- .../edge/providers/orchestration/work.py | 30 ++++++++++++------- .../edge/orchestration/test_targets_unit.py | 25 ++++++++++++++-- 6 files changed, 73 insertions(+), 20 deletions(-) diff --git a/azext_edge/edge/_help.py b/azext_edge/edge/_help.py index e4f325bf0..47afa9a34 100644 --- a/azext_edge/edge/_help.py +++ b/azext_edge/edge/_help.py @@ -482,11 +482,10 @@ def load_iotops_help(): - name: Similar to the prior example but with Arc Container Storage fault-tolerance enabled (requires at least 3 nodes). text: > az iot ops init --cluster mycluster -g myresourcegroup --enable-fault-tolerance - - name: This example highlights trust settings for a user provided cert manager config. + - name: This example highlights enabling user trust settings for a custom cert-manager config. + This will skip deployment of the system cert-manager and trust-manager. text: > - az iot ops init --cluster mycluster -g myresourcegroup --trust-settings - configMapName=example-bundle configMapKey=trust-bundle.pem issuerKind=ClusterIssuer - issuerName=trust-manager-selfsigned-issuer + az iot ops init --cluster mycluster -g myresourcegroup --user-trust """ @@ -522,6 +521,13 @@ def load_iotops_help(): text: > az iot ops create --cluster mycluster -g myresourcegroup --name myinstance --sr-resource-id $SCHEMA_REGISTRY_RESOURCE_ID --enable-rsync + - name: This example highlights trust settings for a user provided cert-manager config. + Note that the cluster must have been initialized with `--user-trust` and a user cert-manager deployment must be present. + text: > + az iot ops create --cluster mycluster -g myresourcegroup --name myinstance --sr-resource-id $SCHEMA_REGISTRY_RESOURCE_ID + --trust-settings configMapName=example-bundle configMapKey=trust-bundle.pem + issuerKind=ClusterIssuer issuerName=trust-manager-selfsigned-issuer + """ helps[ diff --git a/azext_edge/edge/commands_edge.py b/azext_edge/edge/commands_edge.py index 9402977f8..416cb7a5a 100644 --- a/azext_edge/edge/commands_edge.py +++ b/azext_edge/edge/commands_edge.py @@ -109,10 +109,10 @@ def init( cmd, cluster_name: str, resource_group_name: str, - trust_settings: Optional[List[str]] = None, enable_fault_tolerance: Optional[bool] = None, no_progress: Optional[bool] = None, ensure_latest: Optional[bool] = None, + user_trust: Optional[bool] = None, **kwargs, ) -> Union[Dict[str, Any], None]: from .common import INIT_NO_PREFLIGHT_ENV_KEY @@ -130,7 +130,7 @@ def init( cluster_name=cluster_name, resource_group_name=resource_group_name, enable_fault_tolerance=enable_fault_tolerance, - trust_settings=trust_settings, + user_trust=user_trust, ) @@ -167,6 +167,7 @@ def create_instance( enable_rsync_rules: Optional[bool] = None, instance_description: Optional[str] = None, dataflow_profile_instances: int = 1, + trust_settings: Optional[List[str]] = None, # Ops extension container_runtime_socket: Optional[str] = None, kubernetes_distro: str = KubernetesDistroType.k8s.value, @@ -221,6 +222,7 @@ def create_instance( instance_description=instance_description, add_insecure_listener=add_insecure_listener, dataflow_profile_instances=dataflow_profile_instances, + trust_settings=trust_settings, # Ops Extension container_runtime_socket=container_runtime_socket, kubernetes_distro=kubernetes_distro, diff --git a/azext_edge/edge/params.py b/azext_edge/edge/params.py index 8320372a7..ce0ffbabd 100644 --- a/azext_edge/edge/params.py +++ b/azext_edge/edge/params.py @@ -539,6 +539,14 @@ def load_iotops_arguments(self, _): "used, a system provided self-signed trust bundle is configured.", arg_group="Trust", ) + context.argument( + "user_trust", + options_list=["--user-trust", "--ut"], + arg_type=get_three_state_flag(), + help="Skip the deployment of the system cert-manager and trust-manager " + "in favor of a user-provided configuration.", + arg_group="Trust", + ) with self.argument_context("iot ops upgrade") as context: # Schema Registry diff --git a/azext_edge/edge/providers/orchestration/targets.py b/azext_edge/edge/providers/orchestration/targets.py index 9c4e1f505..336027381 100644 --- a/azext_edge/edge/providers/orchestration/targets.py +++ b/azext_edge/edge/providers/orchestration/targets.py @@ -63,6 +63,8 @@ def __init__( # Akri kubernetes_distro: str = KubernetesDistroType.k8s.value, container_runtime_socket: Optional[str] = None, + # User Trust Config + user_trust: Optional[bool] = None, **_, ): self.cluster_name = cluster_name @@ -84,6 +86,7 @@ def __init__( self.trust_settings = assemble_nargs_to_dict(trust_settings) self.trust_config = self.get_trust_settings_target_map() self.advanced_config = self.get_advanced_config_target_map() + self.user_trust = user_trust # Dataflow self.dataflow_profile_instances = self._sanitize_int(dataflow_profile_instances) @@ -150,8 +153,11 @@ def get_ops_enablement_template( }, template_blueprint=M3_ENABLEMENT_TEMPLATE, ) - - # TODO - @digimaun - expand trustSource for self managed & trustBundleSettings + if self.user_trust: + # disable cert and trust manager + parameters["trustConfig"]["value"]["source"] = "CustomerManaged" + # patch enablement template expecting full trust settings for source: CustomerManaged + template.get_type_definition("_1.CustomerManaged")["properties"]["settings"]["nullable"] = True return template.content, parameters def get_ops_instance_template( diff --git a/azext_edge/edge/providers/orchestration/work.py b/azext_edge/edge/providers/orchestration/work.py index a6570c77b..d229c4847 100644 --- a/azext_edge/edge/providers/orchestration/work.py +++ b/azext_edge/edge/providers/orchestration/work.py @@ -139,18 +139,14 @@ def _format_instance_desc(self) -> str: def _build_display(self): pre_check_cat_desc = "Pre-Flight" self._display.add_category(WorkCategoryKey.PRE_FLIGHT, pre_check_cat_desc, skipped=not self._pre_flight) - self._display.add_step( - WorkCategoryKey.PRE_FLIGHT, WorkStepKey.REG_RP, "Ensure registered resource providers" - ) + self._display.add_step(WorkCategoryKey.PRE_FLIGHT, WorkStepKey.REG_RP, "Ensure registered resource providers") self._display.add_step( WorkCategoryKey.PRE_FLIGHT, WorkStepKey.ENUMERATE_PRE_FLIGHT, "Enumerate pre-flight checks" ) if self._apply_foundation: self._display.add_category(WorkCategoryKey.ENABLE_IOT_OPS, "Enablement") - self._display.add_step( - WorkCategoryKey.ENABLE_IOT_OPS, WorkStepKey.WHAT_IF_ENABLEMENT, "What-If evaluation" - ) + self._display.add_step(WorkCategoryKey.ENABLE_IOT_OPS, WorkStepKey.WHAT_IF_ENABLEMENT, "What-If evaluation") self._display.add_step( WorkCategoryKey.ENABLE_IOT_OPS, WorkStepKey.DEPLOY_ENABLEMENT, @@ -292,9 +288,7 @@ def _do_work(self): # noqa: C901 # Enable IoT Ops workflow if self._apply_foundation: enablement_work_name = self._work_format_str.format(op="enablement") - self.render_display( - category=WorkCategoryKey.ENABLE_IOT_OPS, active_step=WorkStepKey.WHAT_IF_ENABLEMENT - ) + self.render_display(category=WorkCategoryKey.ENABLE_IOT_OPS, active_step=WorkStepKey.WHAT_IF_ENABLEMENT) enablement_content, enablement_parameters = self._targets.get_ops_enablement_template() self._deploy_template( content=enablement_content, @@ -360,6 +354,22 @@ def _do_work(self): # noqa: C901 "Instance deployment will not continue. Please run `az iot ops init`." ) + # validate trust config in platform extension matches trust settings in create + platform_extension_config = self._extension_map[IOT_OPS_PLAT_EXTENSION_TYPE]["properties"][ + "configurationSettings" + ] + is_user_trust = platform_extension_config.get("installCertManager", "").lower() != "true" + if is_user_trust and not self._targets.trust_settings: + raise ValidationError( + "Cluster was enabled with user-managed trust configuration, " + "--trust-settings arguments are required to create an instance on this cluster." + ) + elif not is_user_trust and self._targets.trust_settings: + raise ValidationError( + "Cluster was enabled with system cert-manager, " + "trust settings (--trust-settings) are not applicable to this cluster." + ) + instance_work_name = self._work_format_str.format(op="instance") self.render_display(category=WorkCategoryKey.DEPLOY_IOT_OPS, active_step=WorkStepKey.WHAT_IF_INSTANCE) instance_content, instance_parameters = self._targets.get_ops_instance_template( @@ -395,7 +405,7 @@ def _do_work(self): # noqa: C901 instance_output = wait_for_terminal_state(instance_poller) # safely get nested property - keys = ['properties', 'outputs', 'aioExtension', 'value', 'identityPrincipalId'] + keys = ["properties", "outputs", "aioExtension", "value", "identityPrincipalId"] extension_principal_id = reduce(lambda val, key: val.get(key) if val else None, keys, instance_output) # TODO - @c-ryan-k consider setting role_assignment_error if extension_principal_id is None role_assignment_error = None diff --git a/azext_edge/tests/edge/orchestration/test_targets_unit.py b/azext_edge/tests/edge/orchestration/test_targets_unit.py index b6e0d2a2e..1a531698c 100644 --- a/azext_edge/tests/edge/orchestration/test_targets_unit.py +++ b/azext_edge/tests/edge/orchestration/test_targets_unit.py @@ -104,6 +104,16 @@ def get_trust_settings(): container_runtime_socket=generate_random_string(), custom_broker_config={generate_random_string(): generate_random_string()}, ), + build_target_scenario( + cluster_name=generate_random_string(), + resource_group_name=generate_random_string(), + schema_registry_resource_id=get_resource_id( + resource_path="/schemaRegistries/myregistry", + resource_group_name=generate_random_string(), + resource_provider="Microsoft.DeviceRegistry", + ), + user_trust=True, + ), ], ) def test_init_targets(target_scenario: dict): @@ -129,8 +139,10 @@ def test_init_targets(target_scenario: dict): verify_user_trust_settings(targets, target_scenario) - _, enablement_parameters = targets.get_ops_enablement_template() - # test enablement_template + enablement_template, enablement_parameters = targets.get_ops_enablement_template() + + verify_user_trust_enablement(targets, enablement_template, target_scenario) + for parameter in enablement_parameters: targets_key = parameter if parameter in ENABLEMENT_PARAM_CONVERSION_MAP: @@ -209,3 +221,12 @@ def verify_user_trust_settings(targets: InitTargets, target_scenario: dict): "configMapName": target_scenario["trust_settings"]["configMapName"], }, } + + +def verify_user_trust_enablement(targets: InitTargets, enablement_template: dict, target_scenario: dict): + if target_scenario.get("user_trust"): + assert targets.trust_config["source"] == "CustomerManaged" + # TODO @c-ryan-k - Enablement template should not require "settings" for customer managed trust config + assert enablement_template["definitions"]["_1.CustomerManaged"]["properties"]["settings"]["nullable"] + elif not target_scenario.get("trust_settings"): + assert targets.trust_config["source"] == "SelfSigned" From fca961efdc525566a46d007c8fe7ff62ccdbbcf2 Mon Sep 17 00:00:00 2001 From: Victoria Litvinova <73560279+vilit1@users.noreply.github.com> Date: Wed, 30 Oct 2024 16:50:37 -0700 Subject: [PATCH 24/26] fix: improve `az iot ops upgrade` (#429) Main additions: 1. don't allow preview 0.7.0 instances to be upgraded 2. improve messaging for instance upgrade 3. allow for mid 0.8.0 instance upgrades --- .../edge/providers/orchestration/upgrade.py | 103 +++++++++++------- .../edge/orchestration/test_upgrade_unit.py | 96 +++++++++++----- setup.py | 1 - 3 files changed, 136 insertions(+), 64 deletions(-) diff --git a/azext_edge/edge/providers/orchestration/upgrade.py b/azext_edge/edge/providers/orchestration/upgrade.py index 9c491e9b3..119489583 100644 --- a/azext_edge/edge/providers/orchestration/upgrade.py +++ b/azext_edge/edge/providers/orchestration/upgrade.py @@ -10,9 +10,10 @@ from azure.cli.core.azclierror import ( ArgumentUsageError, AzureResponseError, + CLIInternalError, RequiredArgumentMissingError, ) -from azure.core.exceptions import HttpResponseError, ResourceNotFoundError +from azure.core.exceptions import HttpResponseError from knack.log import get_logger from rich import print from rich.console import NewLine @@ -28,6 +29,7 @@ logger = get_logger(__name__) INSTANCE_7_API = "2024-08-15-preview" +INSTANCE_7_VERSION = "0.7.31" def upgrade_ops_resources( @@ -84,11 +86,14 @@ def __init__( self._progress_shown = False def do_work(self, confirm_yes: Optional[bool] = None): + from .template import M3_INSTANCE_TEMPLATE + self.new_aio_version = M3_INSTANCE_TEMPLATE.content["variables"]["VERSIONS"]["iotOperations"] # get the resource map from the instance (checks if update is needed for instance) self.resource_map = self._get_resource_map() # Ensure cluster exists with existing resource_map pattern. self.resource_map.connected_cluster.resource self.cluster_name = self.resource_map.connected_cluster.cluster_name + current_version = self.instance["properties"]["version"] # get the extensions to update, populate the expected patches extension_text = self._check_extensions() @@ -97,20 +102,22 @@ def do_work(self, confirm_yes: Optional[bool] = None): print("[green]Nothing to upgrade :)[/green]") return - print("Azure IoT Operations Upgrade") - print() - if self.extensions_to_update: - print(Padding("Extensions to update:", (0, 0, 0, 2))) - print(Padding(extension_text, (0, 0, 0, 4))) - - if self.require_instance_upgrade: - print(Padding( - "Old Azure IoT Operations instance version found. Will update the instance to the latest version.", - (0, 0, 0, 2) - )) - - print() - print("[yellow]Upgrading may fail and require you to delete and re-create your cluster.[/yellow]") + if self._render_progress: + print("Azure IoT Operations Upgrade") + print() + if self.extensions_to_update: + print(Padding("Extensions to update:", (0, 0, 0, 2))) + print(Padding(extension_text, (0, 0, 0, 4))) + # if the aio extension is updated, the instance will be too + if "azure-iot-operations" in extension_text: + print(Padding( + f"Azure IoT Operations instance version {current_version} found. Will update the instance to " + f"version {self.new_aio_version}.", + (0, 0, 0, 2) + )) + + print() + print("[yellow]Upgrading may fail and require you to delete and re-create your cluster.[/yellow]") should_bail = not should_continue_prompt(confirm_yes=confirm_yes, context="Upgrade") if should_bail: @@ -120,15 +127,12 @@ def do_work(self, confirm_yes: Optional[bool] = None): return self._process() def _check_extensions(self) -> str: - from packaging import version from .template import M3_ENABLEMENT_TEMPLATE, M3_INSTANCE_TEMPLATE version_map = M3_ENABLEMENT_TEMPLATE.content["variables"]["VERSIONS"].copy() version_map.update(M3_INSTANCE_TEMPLATE.content["variables"]["VERSIONS"].copy()) train_map = M3_ENABLEMENT_TEMPLATE.content["variables"]["TRAINS"].copy() train_map.update(M3_INSTANCE_TEMPLATE.content["variables"]["TRAINS"].copy()) - self.new_aio_version = version_map["iotOperations"] - # note that the secret store type changes but somehow it all works out :) # the order is determined by depends on in the template type_to_key_map = OrderedDict([ @@ -146,6 +150,13 @@ def _check_extensions(self) -> str: }) # make sure order is kept self.extensions_to_update = OrderedDict() + + # package import can be wack + try: + from packaging import version + except ImportError: + raise CLIInternalError("Cannot parse extension versions.") + for extension_type, extension in ordered_aio_extensions.items(): extension_key = type_to_key_map[extension_type] current_version = extension["properties"].get("version", "0") @@ -170,13 +181,16 @@ def _check_extensions(self) -> str: # should still be fine for mesh - if it is at the current version, already, it should have these props # worst case it the extra config settings do nothing - if all([ - version.parse(current_version) >= version.parse(version_map[extension_key]), - train_map[extension_key].lower() == current_train - ]): - logger.info(f"Extension {extension['name']} is already up to date.") - continue - self.extensions_to_update[extension["name"]] = extension_update + try: + if all([ + version.parse(current_version) >= version.parse(version_map[extension_key]), + train_map[extension_key].lower() == current_train + ]): + logger.info(f"Extension {extension['name']} is already up to date.") + continue + self.extensions_to_update[extension["name"]] = extension_update + except version.InvalidVersion: + raise CLIInternalError(f"Cannot parse extension versions for {extension['name']}.") # try to get the sr resource id if not present already extension_props = type_to_aio_extensions["microsoft.iotoperations"]["properties"] @@ -194,6 +208,11 @@ def _check_extensions(self) -> str: def _get_resource_map(self) -> IoTOperationsResourceMap: self.require_instance_upgrade = True + api_spec_error = "HttpResponsePayloadAPISpecValidationFailed" + error_msg = ( + f"Cannot upgrade instance {self.instance_name}, please delete your instance, including " + "dependencies, and reinstall." + ) # try with 2024-08-15-preview -> it is m2 try: self.instance = self.resource_client.resources.get( @@ -204,23 +223,26 @@ def _get_resource_map(self) -> IoTOperationsResourceMap: resource_name=self.instance_name, api_version=INSTANCE_7_API ) + # don't deal with bug bash m2's - only released version + if self.instance["properties"]["version"] != INSTANCE_7_VERSION: + raise ArgumentUsageError(error_msg) return self.instances.get_resource_map(self.instance) - except HttpResponseError: - self.require_instance_upgrade = False + except HttpResponseError as e: + if api_spec_error not in e.message: + raise e + # try with 2024-09-15-preview -> it is m3 already + self.require_instance_upgrade = False try: self.instance = self.instances.show( name=self.instance_name, resource_group_name=self.resource_group_name ) return self.instances.get_resource_map(self.instance) - except ResourceNotFoundError as e: + except HttpResponseError as e: + if api_spec_error in e.message: + raise ArgumentUsageError(error_msg) raise e - except HttpResponseError: - raise ArgumentUsageError( - f"Cannot upgrade instance {self.instance_name}, please delete your instance, including " - "dependencies, and reinstall." - ) def _render_display(self, description: str): if self._render_progress: @@ -248,17 +270,18 @@ def _stop_display(self): def _process(self): if self.require_instance_upgrade: + + # prep the instance + self.instance.pop("systemData", None) + inst_props = self.instance["properties"] # m3 extensions should not have the reg id if not self.sr_resource_id: raise RequiredArgumentMissingError( "Cannot determine the schema registry id from installed extensions, please provide the schema " "registry id via `--sr-id`." ) - - # prep the instance - self.instance.pop("systemData", None) - inst_props = self.instance["properties"] inst_props["schemaRegistryRef"] = {"resourceId": self.sr_resource_id} + inst_props["version"] = self.new_aio_version inst_props.pop("schemaRegistryNamespace", None) inst_props.pop("components", None) @@ -283,7 +306,6 @@ def _process(self): raise AzureResponseError( f"Updating extension {extension} failed with the error message: {status['message']}" ) - if self.require_instance_upgrade: # update the instance + minimize the code to be taken out once this is no longer needed self._render_display("[yellow]Updating instance...") @@ -295,6 +317,11 @@ def _process(self): resource=self.instance ) ) + else: + result = self.instances.show( + resource_group_name=self.resource_group_name, + name=self.instance_name, + ) except (HttpResponseError, KeyboardInterrupt) as e: if self.require_instance_upgrade: logger.error( diff --git a/azext_edge/tests/edge/orchestration/test_upgrade_unit.py b/azext_edge/tests/edge/orchestration/test_upgrade_unit.py index 5ea33a29e..275830a20 100644 --- a/azext_edge/tests/edge/orchestration/test_upgrade_unit.py +++ b/azext_edge/tests/edge/orchestration/test_upgrade_unit.py @@ -4,7 +4,6 @@ # Licensed under the MIT License. See License file in the project root for license information. # ---------------------------------------------------------------------------------------------- -from packaging import version from typing import Dict, List, Optional, OrderedDict from unittest.mock import Mock import pytest @@ -119,7 +118,6 @@ def _generate_extensions(**extension_version_map) -> OrderedDict: return extensions -# TODO: if not used for m3 - simplify def _generate_instance(instance_name: str, resource_group: str, m3: bool = False): mock_instance_record = { "extendedLocation": { @@ -144,7 +142,7 @@ def _generate_instance(instance_name: str, resource_group: str, m3: bool = False "type": "microsoft.iotoperations/instances" } if m3: - mock_instance_record["properties"]["schemaRegistryRef"] = {"resource_id": generate_random_string()} + mock_instance_record["properties"]["schemaRegistryRef"] = {"resourceId": generate_random_string()} else: mock_instance_record["properties"]["schemaRegistryNamespace"] = generate_random_string() mock_instance_record["properties"]["components"] = { @@ -178,7 +176,6 @@ def _generate_trains(**trains) -> dict: @pytest.mark.parametrize("no_progress", [False, True]) -@pytest.mark.parametrize("require_instance_update", [False, True]) @pytest.mark.parametrize("current_extensions, new_versions, new_trains", [ # update none ( @@ -259,7 +256,6 @@ def test_upgrade_lifecycle( mocked_logger: Mock, mocked_rich_print: Mock, spy_upgrade_manager: Dict[str, Mock], - require_instance_update: bool, current_extensions: List[dict], new_versions: List[dict], new_trains: List[dict], @@ -276,28 +272,32 @@ def test_upgrade_lifecycle( mocked_resource_map.connected_cluster.extensions = list(current_extensions.values()) extension_update_mock = mocked_resource_map.connected_cluster.clusters.extensions.update_cluster_extension _assemble_template_mock(mocker, new_versions=new_versions, new_trains=new_trains) - m2_instance = None + instance_body = None # the get m2 instance call - if require_instance_update: - m2_instance = _generate_instance(instance_name=instance_name, resource_group=rg_name) + current_version = current_extensions["iot_operations"]["properties"]["version"] + if current_version == "0.7.31": + instance_body = _generate_instance(instance_name=instance_name, resource_group=rg_name) # note the resource client adds an extra / before instances for the parent path. The api doesnt care mocked_responses.add( method=responses.GET, url=f"https://management.azure.com/subscriptions/{get_zeroed_subscription()}/resourcegroups/{rg_name}" f"/providers/Microsoft.IoTOperations//instances/{instance_name}?api-version=2024-08-15-preview", - json=m2_instance, + json=instance_body, status=200, content_type="application/json", ) else: + instance_body = _generate_instance(instance_name=instance_name, resource_group=rg_name, m3=True) mocked_responses.add( method=responses.GET, url=f"https://management.azure.com/subscriptions/{get_zeroed_subscription()}/resourcegroups/{rg_name}" f"/providers/Microsoft.IoTOperations//instances/{instance_name}?api-version=2024-08-15-preview", - status=404, + json={"message": "HttpResponsePayloadAPISpecValidationFailed"}, + status=412, content_type="application/json", ) - # no need to provide valid value for instance show since it will not be used + instance_body["properties"]["version"] = current_version + mocked_instances.show.return_value = instance_body kwargs = { "cmd": mocked_cmd, @@ -316,6 +316,10 @@ def test_upgrade_lifecycle( extensions_to_update = {} extension_update_calls = extension_update_mock.call_args_list call = 0 + try: + from packaging import version + except ImportError: + pytest.fail("Import packaging failed.") for key, extension in current_extensions.items(): if any([ version.parse(extension["properties"]["version"]) < version.parse(new_versions[key]), @@ -341,17 +345,17 @@ def test_upgrade_lifecycle( assert len(extensions_to_update) == len(extension_update_calls) # overall upgrade call - assert spy_upgrade_manager["_process"].called is bool(extensions_to_update or require_instance_update) + assert spy_upgrade_manager["_process"].called is any([extensions_to_update, current_version == "0.7.31"]) - if require_instance_update: + if current_version == "0.7.31": update_args = mocked_instances.iotops_mgmt_client.instance.begin_create_or_update.call_args.kwargs update_body = update_args["resource"] # props that were kept the same for prop in ["extendedLocation", "id", "name", "location", "resourceGroup", "type"]: - assert update_body[prop] == m2_instance[prop] + assert update_body[prop] == instance_body[prop] for prop in ["description", "provisioningState"]: - assert update_body["properties"][prop] == m2_instance["properties"][prop] + assert update_body["properties"][prop] == instance_body["properties"][prop] # props that were removed assert "systemData" not in update_body @@ -361,17 +365,22 @@ def test_upgrade_lifecycle( # props that were added/changed - also ensure right sr id is used assert update_body["properties"]["version"] == new_versions["iot_operations"] aio_ext_props = current_extensions["iot_operations"]["properties"] - assert update_body["properties"]["schemaRegistryRef"]["resourceId"] == ( + expected_sr_resource_id = ( sr_resource_id or aio_ext_props["configurationSettings"]["schemaRegistry.values.resourceId"] ) + if current_version != "0.7.31": + expected_sr_resource_id = instance_body["properties"]["schemaRegistryRef"]["resourceId"] + + assert update_body["properties"]["schemaRegistryRef"]["resourceId"] == expected_sr_resource_id else: # make sure we tried to get the m3 - mocked_instances.show.assert_called() + assert mocked_instances.show.call_count == (2 if extension_update_calls else 1) mocked_instances.iotops_mgmt_client.instance.begin_create_or_update.assert_not_called() # no progress check - if kwargs["no_progress"]: + if no_progress: mocked_live_display.assert_called_once_with(None, transient=False, refresh_per_second=8, auto_refresh=False) + assert mocked_rich_print.called == (not spy_upgrade_manager["_process"].called or not no_progress) def test_upgrade_error( @@ -409,11 +418,13 @@ def test_upgrade_error( status=200, content_type="application/json", ) + error_msg = "instance update failed" mocked_instances.iotops_mgmt_client.instance.begin_create_or_update.side_effect = HttpResponseError( - "instance update failed" + error_msg ) with pytest.raises(HttpResponseError) as e: upgrade_ops_resources(**kwargs) + assert error_msg in e.value.message # some random extension has a hidden status error mocked_responses.add( @@ -442,11 +453,11 @@ def test_upgrade_error( status=200, content_type="application/json", ) - extension_update_mock.side_effect = HttpResponseError( - "extension update failed" - ) - with pytest.raises(HttpResponseError): + error_msg = "extension update failed" + extension_update_mock.side_effect = HttpResponseError(error_msg) + with pytest.raises(HttpResponseError) as e: upgrade_ops_resources(**kwargs) + assert error_msg in e.value.message # need to update the instance but cannot get the sr resource id mocked_responses.add( @@ -462,7 +473,20 @@ def test_upgrade_error( with pytest.raises(RequiredArgumentMissingError): upgrade_ops_resources(**kwargs) - # cannot get m2 or m3 + # instance is an unreleased bug bash version + m2_instance["properties"]["version"] = "0.7.25" + mocked_responses.add( + method=responses.GET, + url=f"https://management.azure.com/subscriptions/{get_zeroed_subscription()}/resourcegroups/{rg_name}" + f"/providers/Microsoft.IoTOperations//instances/{instance_name}?api-version=2024-08-15-preview", + json=m2_instance, + status=200, + content_type="application/json", + ) + with pytest.raises(ArgumentUsageError): + upgrade_ops_resources(**kwargs) + + # other m2 get errors raise normally mocked_responses.add( method=responses.GET, url=f"https://management.azure.com/subscriptions/{get_zeroed_subscription()}/resourcegroups/{rg_name}" @@ -470,6 +494,28 @@ def test_upgrade_error( status=404, content_type="application/json", ) - mocked_instances.show.side_effect = HttpResponseError("instance get failed") + with pytest.raises(HttpResponseError) as e: + upgrade_ops_resources(**kwargs) + assert e.value.response.status_code == 404 + + # other m3 get errors raise normally + mocked_responses.add( + method=responses.GET, + url=f"https://management.azure.com/subscriptions/{get_zeroed_subscription()}/resourcegroups/{rg_name}" + f"/providers/Microsoft.IoTOperations//instances/{instance_name}?api-version=2024-08-15-preview", + json={"message": "HttpResponsePayloadAPISpecValidationFailed"}, + status=412, + content_type="application/json", + ) + error_msg = "instance get failed" + mocked_instances.show.side_effect = HttpResponseError(error_msg) + with pytest.raises(HttpResponseError) as e: + upgrade_ops_resources(**kwargs) + assert error_msg in e.value.message + + # cannot get m2 or m3 because api spec validation + mocked_instances.show.side_effect = HttpResponseError( + "(HttpResponsePayloadAPISpecValidationFailed) instance get failed" + ) with pytest.raises(ArgumentUsageError): upgrade_ops_resources(**kwargs) diff --git a/setup.py b/setup.py index f80ee6cf0..0a756401e 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,6 @@ DEPENDENCIES = [ - "packaging", "rich>=13.6,<14.0", "kubernetes>=27.2,<29.0", "azure-identity>=1.14.1,<1.18.0", From 44b28ed07eae208fc4db94809bd6cffddb428969 Mon Sep 17 00:00:00 2001 From: Victoria Litvinova <73560279+vilit1@users.noreply.github.com> Date: Thu, 31 Oct 2024 13:34:22 -0700 Subject: [PATCH 25/26] fix: change schema version error handling to look at error codes (#433) --- .../resources/schema_registries.py | 2 +- .../resources/test_schema_unit.py | 54 ++++++++++++++++++- 2 files changed, 53 insertions(+), 3 deletions(-) diff --git a/azext_edge/edge/providers/orchestration/resources/schema_registries.py b/azext_edge/edge/providers/orchestration/resources/schema_registries.py index 66460198d..ed31094c4 100644 --- a/azext_edge/edge/providers/orchestration/resources/schema_registries.py +++ b/azext_edge/edge/providers/orchestration/resources/schema_registries.py @@ -287,7 +287,7 @@ def add_version( resource=resource ) except HttpResponseError as e: - if "AuthorizationFailure" in e.message: + if e.status_code == 412: raise ForbiddenError( "Schema versions require public network access to be enabled in the associated storage account." ) diff --git a/azext_edge/tests/edge/orchestration/resources/test_schema_unit.py b/azext_edge/tests/edge/orchestration/resources/test_schema_unit.py index b4acebd58..624d2f9db 100644 --- a/azext_edge/tests/edge/orchestration/resources/test_schema_unit.py +++ b/azext_edge/tests/edge/orchestration/resources/test_schema_unit.py @@ -454,8 +454,10 @@ def test_version_add(mocked_cmd, mocked_responses: responses, description: Optio assert create_payload["properties"]["description"] == description -def test_version_add_error(mocked_cmd): - from azure.cli.core.azclierror import InvalidArgumentValueError +def test_version_add_error(mocked_cmd, mocked_responses: responses): + from azure.cli.core.azclierror import InvalidArgumentValueError, ForbiddenError + from azure.core.exceptions import HttpResponseError + # bad version with pytest.raises(InvalidArgumentValueError): add_version( cmd=mocked_cmd, @@ -465,3 +467,51 @@ def test_version_add_error(mocked_cmd): schema_version_content=generate_random_string(), resource_group_name=generate_random_string() ) + + schema_name = generate_random_string() + version_num = 1 + registry_name = generate_random_string() + resource_group_name = generate_random_string() + # error checking 412 + mocked_responses.add( + method=responses.PUT, + url=get_schema_version_endpoint( + resource_group_name=resource_group_name, + registry_name=registry_name, + schema_name=schema_name, + schema_version=version_num + ), + status=412, + content_type="application/json", + ) + with pytest.raises(ForbiddenError): + add_version( + cmd=mocked_cmd, + version_name=version_num, + resource_group_name=resource_group_name, + schema_registry_name=registry_name, + schema_name=schema_name, + schema_version_content=generate_random_string(), + ) + + # error checking other + mocked_responses.add( + method=responses.PUT, + url=get_schema_version_endpoint( + resource_group_name=resource_group_name, + registry_name=registry_name, + schema_name=schema_name, + schema_version=version_num + ), + status=404, + content_type="application/json", + ) + with pytest.raises(HttpResponseError): + add_version( + cmd=mocked_cmd, + version_name=version_num, + resource_group_name=resource_group_name, + schema_registry_name=registry_name, + schema_name=schema_name, + schema_version_content=generate_random_string(), + ) From ffd48f52f505fe00ca00fd9eeaabf3d981874cfd Mon Sep 17 00:00:00 2001 From: Elsie4ever <22055990+Elsie4ever@users.noreply.github.com> Date: Thu, 31 Oct 2024 14:24:04 -0700 Subject: [PATCH 26/26] fix: secret sync show and disable (#430) --- azext_edge/constants.py | 2 +- azext_edge/edge/_help.py | 11 ++- azext_edge/edge/command_map.py | 2 +- azext_edge/edge/commands_secretsync.py | 4 +- .../resources/connector/opcua/certs.py | 4 +- .../orchestration/resources/instances.py | 90 +++++++++++++++---- azext_edge/edge/util/az_client.py | 3 - 7 files changed, 84 insertions(+), 32 deletions(-) diff --git a/azext_edge/constants.py b/azext_edge/constants.py index bda3409de..437e1fb2a 100644 --- a/azext_edge/constants.py +++ b/azext_edge/constants.py @@ -7,7 +7,7 @@ import os -VERSION = "0.8.0a2" +VERSION = "0.8.0b1" EXTENSION_NAME = "azure-iot-ops" EXTENSION_ROOT = os.path.dirname(os.path.abspath(__file__)) USER_AGENT = "IotOperationsCliExtension/{}".format(VERSION) diff --git a/azext_edge/edge/_help.py b/azext_edge/edge/_help.py index 47afa9a34..cbee9a67e 100644 --- a/azext_edge/edge/_help.py +++ b/azext_edge/edge/_help.py @@ -716,15 +716,15 @@ def load_iotops_help(): """ helps[ - "iot ops secretsync show" + "iot ops secretsync list" ] = """ type: command - short-summary: Show the secret sync config associated with an instance. + short-summary: List the secret sync configs associated with an instance. examples: - - name: Show the secret sync config associated with an instance. + - name: List the secret sync configs associated with an instance. text: > - az iot ops secretsync show --name myinstance -g myresourcegroup + az iot ops secretsync list --name myinstance -g myresourcegroup """ helps[ @@ -732,6 +732,9 @@ def load_iotops_help(): ] = """ type: command short-summary: Disable secret sync for an instance. + long-summary: | + All the secret provider classes associated with the instance, and all the secret + syncs associated with the secret provider classes will be deleted. examples: - name: Disable secret sync for an instance. diff --git a/azext_edge/edge/command_map.py b/azext_edge/edge/command_map.py index 57fc14e4d..e2cd207bb 100644 --- a/azext_edge/edge/command_map.py +++ b/azext_edge/edge/command_map.py @@ -52,7 +52,7 @@ def load_iotops_commands(self, _): ) as cmd_group: cmd_group.command("enable", "secretsync_enable") cmd_group.command("disable", "secretsync_disable") - cmd_group.show_command("show", "secretsync_show") + cmd_group.show_command("list", "secretsync_list") with self.command_group( "iot ops support", diff --git a/azext_edge/edge/commands_secretsync.py b/azext_edge/edge/commands_secretsync.py index ec3572139..e5d053ec7 100644 --- a/azext_edge/edge/commands_secretsync.py +++ b/azext_edge/edge/commands_secretsync.py @@ -36,8 +36,8 @@ def secretsync_enable( ) -def secretsync_show(cmd, instance_name: str, resource_group_name: str) -> dict: - return Instances(cmd).show_secretsync( +def secretsync_list(cmd, instance_name: str, resource_group_name: str) -> dict: + return Instances(cmd).list_secretsync( name=instance_name, resource_group_name=resource_group_name, ) diff --git a/azext_edge/edge/providers/orchestration/resources/connector/opcua/certs.py b/azext_edge/edge/providers/orchestration/resources/connector/opcua/certs.py index 66695d34c..047e42951 100644 --- a/azext_edge/edge/providers/orchestration/resources/connector/opcua/certs.py +++ b/azext_edge/edge/providers/orchestration/resources/connector/opcua/certs.py @@ -15,7 +15,7 @@ import yaml from ....common import CUSTOM_LOCATIONS_API_VERSION -from ...instances import Instances +from ...instances import SECRET_SYNC_RESOURCE_TYPE, SPC_RESOURCE_TYPE, Instances from ....work import IOT_OPS_EXTENSION_TYPE from ......util.file_operations import read_file_content, validate_file_extension from ......util.queryable import Queryable @@ -30,8 +30,6 @@ console = Console() -SPC_RESOURCE_TYPE = "microsoft.secretsynccontroller/azurekeyvaultsecretproviderclasses" -SECRET_SYNC_RESOURCE_TYPE = "microsoft.secretsynccontroller/secretsyncs" OPCUA_SPC_NAME = "opc-ua-connector" OPCUA_TRUST_LIST_SECRET_SYNC_NAME = "aio-opc-ua-broker-trust-list" OPCUA_ISSUER_LIST_SECRET_SYNC_NAME = "aio-opc-ua-broker-issuer-list" diff --git a/azext_edge/edge/providers/orchestration/resources/instances.py b/azext_edge/edge/providers/orchestration/resources/instances.py index 3c06a0ded..88bf68c38 100644 --- a/azext_edge/edge/providers/orchestration/resources/instances.py +++ b/azext_edge/edge/providers/orchestration/resources/instances.py @@ -31,6 +31,8 @@ console = Console() +SPC_RESOURCE_TYPE = "microsoft.secretsynccontroller/azurekeyvaultsecretproviderclasses" +SECRET_SYNC_RESOURCE_TYPE = "microsoft.secretsynccontroller/secretsyncs" SERVICE_ACCOUNT_DATAFLOW = "aio-dataflow" SERVICE_ACCOUNT_SECRETSYNC = "aio-ssc-sa" KEYVAULT_ROLE_ID_SECRETS_USER = "4633458b-17de-408a-b874-0445c86b69e6" @@ -244,6 +246,7 @@ def enable_secretsync( use_self_hosted_issuer: Optional[bool] = None, **kwargs, ): + # TODO: add unit test mi_resource_id_container = parse_resource_id(mi_user_assigned) keyvault_resource_id_container = parse_resource_id(keyvault_resource_id) with console.status("Working...") as status: @@ -272,7 +275,9 @@ def enable_secretsync( oidc_issuer = self._ensure_oidc_issuer(cluster_resource, use_self_hosted_issuer) cl_resources = resource_map.connected_cluster.get_aio_resources(custom_location_id=custom_location["id"]) - secretsync_spc = self._find_existing_spc(cl_resources) + secretsync_spc = self._find_existing_resources( + cl_resources=cl_resources, resource_type=SPC_RESOURCE_TYPE + ) if secretsync_spc: status.stop() logger.warning( @@ -317,16 +322,19 @@ def enable_secretsync( logger.warning(role_assignment_error) return result_spc - def show_secretsync(self, name: str, resource_group_name: str) -> Optional[dict]: + def list_secretsync(self, name: str, resource_group_name: str) -> Optional[dict]: + # TODO: add unit test with console.status("Working..."): instance = self.show(name=name, resource_group_name=resource_group_name) resource_map = self.get_resource_map(instance) cl_resources = resource_map.connected_cluster.get_aio_resources( custom_location_id=instance["extendedLocation"]["name"] ) - secretsync_spc = self._find_existing_spc(cl_resources) - if secretsync_spc: - return secretsync_spc + secretsync_spcs = self._find_existing_resources( + cl_resources=cl_resources, resource_type=SPC_RESOURCE_TYPE + ) + if secretsync_spcs: + return secretsync_spcs logger.warning(f"No secret provider class detected.\n{get_enable_syntax(name, resource_group_name)}") def disable_secretsync( @@ -336,6 +344,7 @@ def disable_secretsync( confirm_yes: Optional[bool] = None, **kwargs, ): + # TODO: add unit test should_bail = not should_continue_prompt(confirm_yes=confirm_yes) if should_bail: return @@ -346,24 +355,69 @@ def disable_secretsync( cl_resources = resource_map.connected_cluster.get_aio_resources( custom_location_id=instance["extendedLocation"]["name"] ) - secretsync_spc = self._find_existing_spc(cl_resources) - if secretsync_spc: - spc_poller = self.ssc_mgmt_client.azure_key_vault_secret_provider_classes.begin_delete( - resource_group_name=resource_group_name, - azure_key_vault_secret_provider_class_name=secretsync_spc["name"], - ) - wait_for_terminal_state(spc_poller, **kwargs) + secretsync_spcs = self._find_existing_resources( + cl_resources=cl_resources, resource_type=SPC_RESOURCE_TYPE + ) + secretsyncs = self._find_existing_resources( + cl_resources=cl_resources, resource_type=SECRET_SYNC_RESOURCE_TYPE + ) + + related_secretsyncs = [] + if secretsync_spcs: + for secretsync_spc in secretsync_spcs: + spc_poller = self.ssc_mgmt_client.azure_key_vault_secret_provider_classes.begin_delete( + resource_group_name=resource_group_name, + azure_key_vault_secret_provider_class_name=secretsync_spc["name"], + ) + wait_for_terminal_state(spc_poller, **kwargs) + + # get associated secret sync names + related_secretsyncs.extend( + self._find_spc_related_secretsyncs( + spc_name=secretsync_spc["name"], + secretsync_resources=secretsyncs, + ) + ) + + # delete associated secret syncs + if related_secretsyncs: + for secretsync in related_secretsyncs: + secretsync_poller = self.ssc_mgmt_client.secret_syncs.begin_delete( + resource_group_name=resource_group_name, + secret_sync_name=secretsync, + ) + wait_for_terminal_state(secretsync_poller, **kwargs) + return logger.warning(f"No secret provider class detected.\n{get_enable_syntax(name, resource_group_name)}") - def _find_existing_spc(self, cl_resources: List[dict]) -> Optional[dict]: + def _find_existing_resources(self, cl_resources: List[dict], resource_type: str) -> Optional[List[dict]]: + resources = [] for resource in cl_resources: - if resource["type"].lower() == "microsoft.secretsynccontroller/azurekeyvaultsecretproviderclasses": + if resource["type"].lower() == resource_type: resource_id_container = parse_resource_id(resource["id"]) - return self.ssc_mgmt_client.azure_key_vault_secret_provider_classes.get( - resource_group_name=resource_id_container.resource_group_name, - azure_key_vault_secret_provider_class_name=resource_id_container.resource_name, - ) + if resource_type == SPC_RESOURCE_TYPE: + resources.append( + self.ssc_mgmt_client.azure_key_vault_secret_provider_classes.get( + resource_group_name=resource_id_container.resource_group_name, + azure_key_vault_secret_provider_class_name=resource_id_container.resource_name, + ) + ) + elif resource_type == SECRET_SYNC_RESOURCE_TYPE: + resources.append( + self.ssc_mgmt_client.secret_syncs.get( + resource_group_name=resource_id_container.resource_group_name, + secret_sync_name=resource_id_container.resource_name, + ) + ) + return resources + + def _find_spc_related_secretsyncs(self, spc_name: str, secretsync_resources: List[dict]) -> List[str]: + related_secretsyncs = [] + for secretsync in secretsync_resources: + if secretsync["properties"]["secretProviderClassName"] == spc_name: + related_secretsyncs.append(secretsync["name"]) + return related_secretsyncs def _attempt_keyvault_role_assignments( self, keyvault_resource_id_container: ResourceIdContainer, mi_user_assigned: dict diff --git a/azext_edge/edge/util/az_client.py b/azext_edge/edge/util/az_client.py index d0fc09206..488e9e2c1 100644 --- a/azext_edge/edge/util/az_client.py +++ b/azext_edge/edge/util/az_client.py @@ -196,9 +196,6 @@ def get_keyvault_client(subscription_id: str, keyvault_name: str, **kwargs) -> " **kwargs, ) - # wait to set the access token - sleep(5) - return client