From 80d7447610f94ea943c83156dfd3c29e47269109 Mon Sep 17 00:00:00 2001 From: william051200 Date: Thu, 5 Mar 2026 10:41:02 +0800 Subject: [PATCH 1/7] Generate vm host code --- .../vm/aaz/latest/vm/host/__init__.py | 2 + .../vm/aaz/latest/vm/host/_create.py | 406 +++++++++++++ .../vm/aaz/latest/vm/host/_show.py | 34 +- .../vm/aaz/latest/vm/host/_update.py | 545 ++++++++++++++++++ 4 files changed, 982 insertions(+), 5 deletions(-) create mode 100644 src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_create.py create mode 100644 src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_update.py diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/__init__.py index fcd450f93d3..38f4682488c 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/__init__.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/__init__.py @@ -9,6 +9,7 @@ # flake8: noqa from .__cmd_group import * +from ._create import * from ._delete import * from ._list import * from ._list_resize_options import * @@ -16,4 +17,5 @@ from ._resize import * from ._restart import * from ._show import * +from ._update import * from ._wait import * diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_create.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_create.py new file mode 100644 index 00000000000..cc15cea42bc --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_create.py @@ -0,0 +1,406 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +class Create(AAZCommand): + """Create a dedicated host. + """ + + _aaz_info = { + "version": "2024-11-01", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/hostgroups/{}/hosts/{}", "2024-11-01"], + ] + } + + AZ_SUPPORT_NO_WAIT = True + + def _handler(self, command_args): + super()._handler(command_args) + return self.build_lro_poller(self._execute_operations, self._output) + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.host_group_name = AAZStrArg( + options=["--host-group", "--host-group-name"], + help="The name of the dedicated host group.", + required=True, + ) + _args_schema.host_name = AAZStrArg( + options=["-n", "--name", "--host-name"], + help="The name of the dedicated host .", + required=True, + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + required=True, + ) + + # define Arg Group "Parameters" + + _args_schema = cls._args_schema + _args_schema.location = AAZResourceLocationArg( + arg_group="Parameters", + help="Resource location", + required=True, + fmt=AAZResourceLocationArgFormat( + resource_group_arg="resource_group", + ), + ) + _args_schema.sku = AAZObjectArg( + options=["--sku"], + arg_group="Parameters", + help="SKU of the dedicated host for Hardware Generation and VM family. Only name is required to be set. List Microsoft.Compute SKUs for a list of possible values.", + required=True, + ) + _args_schema.tags = AAZDictArg( + options=["--tags"], + arg_group="Parameters", + help="Resource tags", + ) + + sku = cls._args_schema.sku + sku.capacity = AAZIntArg( + options=["capacity"], + help="Specifies the number of virtual machines in the scale set.", + ) + sku.name = AAZStrArg( + options=["name"], + help="The sku name.", + ) + sku.tier = AAZStrArg( + options=["tier"], + help="Specifies the tier of virtual machines in a scale set.

Possible Values:

**Standard**

**Basic**", + ) + + tags = cls._args_schema.tags + tags.Element = AAZStrArg() + + # define Arg Group "Properties" + + _args_schema = cls._args_schema + _args_schema.auto_replace_on_failure = AAZBoolArg( + options=["--auto-replace-on-failure"], + arg_group="Properties", + help="Specifies whether the dedicated host should be replaced automatically in case of a failure. The value is defaulted to 'true' when not provided.", + ) + _args_schema.license_type = AAZStrArg( + options=["--license-type"], + arg_group="Properties", + help="Specifies the software license type that will be applied to the VMs deployed on the dedicated host.

Possible values are:

**None**

**Windows_Server_Hybrid**

**Windows_Server_Perpetual**

Default: **None**", + enum={"None": "None", "Windows_Server_Hybrid": "Windows_Server_Hybrid", "Windows_Server_Perpetual": "Windows_Server_Perpetual"}, + ) + _args_schema.platform_fault_domain = AAZIntArg( + options=["--platform-fault-domain"], + arg_group="Properties", + help="Fault domain of the dedicated host within a dedicated host group.", + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + yield self.DedicatedHostsCreateOrUpdate(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + class DedicatedHostsCreateOrUpdate(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [202]: + return self.client.build_lro_polling( + self.ctx.args.no_wait, + session, + self.on_200_201, + self.on_error, + lro_options={"final-state-via": "location"}, + path_format_arguments=self.url_parameters, + ) + if session.http_response.status_code in [200, 201]: + return self.client.build_lro_polling( + self.ctx.args.no_wait, + session, + self.on_200_201, + self.on_error, + lro_options={"final-state-via": "location"}, + path_format_arguments=self.url_parameters, + ) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}", + **self.url_parameters + ) + + @property + def method(self): + return "PUT" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "hostGroupName", self.ctx.args.host_group_name, + required=True, + ), + **self.serialize_url_param( + "hostName", self.ctx.args.host_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2024-11-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Content-Type", "application/json", + ), + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + @property + def content(self): + _content_value, _builder = self.new_content_builder( + self.ctx.args, + typ=AAZObjectType, + typ_kwargs={"flags": {"required": True, "client_flatten": True}} + ) + _builder.set_prop("location", AAZStrType, ".location", typ_kwargs={"flags": {"required": True}}) + _builder.set_prop("properties", AAZObjectType, typ_kwargs={"flags": {"client_flatten": True}}) + _builder.set_prop("sku", AAZObjectType, ".sku", typ_kwargs={"flags": {"required": True}}) + _builder.set_prop("tags", AAZDictType, ".tags") + + properties = _builder.get(".properties") + if properties is not None: + properties.set_prop("autoReplaceOnFailure", AAZBoolType, ".auto_replace_on_failure") + properties.set_prop("licenseType", AAZStrType, ".license_type") + properties.set_prop("platformFaultDomain", AAZIntType, ".platform_fault_domain") + + sku = _builder.get(".sku") + if sku is not None: + sku.set_prop("capacity", AAZIntType, ".capacity") + sku.set_prop("name", AAZStrType, ".name") + sku.set_prop("tier", AAZStrType, ".tier") + + tags = _builder.get(".tags") + if tags is not None: + tags.set_elements(AAZStrType, ".") + + return self.serialize_content(_content_value) + + def on_200_201(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200_201 + ) + + _schema_on_200_201 = None + + @classmethod + def _build_schema_on_200_201(cls): + if cls._schema_on_200_201 is not None: + return cls._schema_on_200_201 + + cls._schema_on_200_201 = AAZObjectType() + + _schema_on_200_201 = cls._schema_on_200_201 + _schema_on_200_201.id = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200_201.location = AAZStrType( + flags={"required": True}, + ) + _schema_on_200_201.name = AAZStrType( + flags={"read_only": True}, + ) + _schema_on_200_201.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + _schema_on_200_201.sku = AAZObjectType( + flags={"required": True}, + ) + _schema_on_200_201.system_data = AAZObjectType( + serialized_name="systemData", + flags={"read_only": True}, + ) + _schema_on_200_201.tags = AAZDictType() + _schema_on_200_201.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = cls._schema_on_200_201.properties + properties.auto_replace_on_failure = AAZBoolType( + serialized_name="autoReplaceOnFailure", + ) + properties.host_id = AAZStrType( + serialized_name="hostId", + flags={"read_only": True}, + ) + properties.instance_view = AAZObjectType( + serialized_name="instanceView", + flags={"read_only": True}, + ) + properties.license_type = AAZStrType( + serialized_name="licenseType", + ) + properties.platform_fault_domain = AAZIntType( + serialized_name="platformFaultDomain", + ) + properties.provisioning_state = AAZStrType( + serialized_name="provisioningState", + flags={"read_only": True}, + ) + properties.provisioning_time = AAZStrType( + serialized_name="provisioningTime", + flags={"read_only": True}, + ) + properties.time_created = AAZStrType( + serialized_name="timeCreated", + flags={"read_only": True}, + ) + properties.virtual_machines = AAZListType( + serialized_name="virtualMachines", + flags={"read_only": True}, + ) + + instance_view = cls._schema_on_200_201.properties.instance_view + instance_view.asset_id = AAZStrType( + serialized_name="assetId", + flags={"read_only": True}, + ) + instance_view.available_capacity = AAZObjectType( + serialized_name="availableCapacity", + ) + instance_view.statuses = AAZListType() + + available_capacity = cls._schema_on_200_201.properties.instance_view.available_capacity + available_capacity.allocatable_v_ms = AAZListType( + serialized_name="allocatableVMs", + ) + + allocatable_v_ms = cls._schema_on_200_201.properties.instance_view.available_capacity.allocatable_v_ms + allocatable_v_ms.Element = AAZObjectType() + + _element = cls._schema_on_200_201.properties.instance_view.available_capacity.allocatable_v_ms.Element + _element.count = AAZFloatType() + _element.vm_size = AAZStrType( + serialized_name="vmSize", + ) + + statuses = cls._schema_on_200_201.properties.instance_view.statuses + statuses.Element = AAZObjectType() + + _element = cls._schema_on_200_201.properties.instance_view.statuses.Element + _element.code = AAZStrType() + _element.display_status = AAZStrType( + serialized_name="displayStatus", + ) + _element.level = AAZStrType() + _element.message = AAZStrType() + _element.time = AAZStrType() + + virtual_machines = cls._schema_on_200_201.properties.virtual_machines + virtual_machines.Element = AAZObjectType() + + _element = cls._schema_on_200_201.properties.virtual_machines.Element + _element.id = AAZStrType( + flags={"read_only": True}, + ) + + sku = cls._schema_on_200_201.sku + sku.capacity = AAZIntType() + sku.name = AAZStrType() + sku.tier = AAZStrType() + + system_data = cls._schema_on_200_201.system_data + system_data.created_at = AAZStrType( + serialized_name="createdAt", + ) + system_data.created_by = AAZStrType( + serialized_name="createdBy", + ) + system_data.created_by_type = AAZStrType( + serialized_name="createdByType", + ) + system_data.last_modified_at = AAZStrType( + serialized_name="lastModifiedAt", + ) + system_data.last_modified_by = AAZStrType( + serialized_name="lastModifiedBy", + ) + system_data.last_modified_by_type = AAZStrType( + serialized_name="lastModifiedByType", + ) + + tags = cls._schema_on_200_201.tags + tags.Element = AAZStrType() + + return cls._schema_on_200_201 + + +class _CreateHelper: + """Helper class for Create""" + + +__all__ = ["Create"] diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_show.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_show.py index d44e51edcf4..565d2d5eaa7 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_show.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_show.py @@ -22,9 +22,9 @@ class Show(AAZCommand): """ _aaz_info = { - "version": "2022-11-01", + "version": "2024-11-01", "resources": [ - ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/hostgroups/{}/hosts/{}", "2022-11-01"], + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/hostgroups/{}/hosts/{}", "2024-11-01"], ] } @@ -57,13 +57,12 @@ def _build_arguments_schema(cls, *args, **kwargs): id_part="child_name_1", ) _args_schema.resource_group = AAZResourceGroupNameArg( - help="Name of resource group. You can configure the default group using `az configure --defaults group=`.", required=True, ) _args_schema.expand = AAZStrArg( options=["--expand"], help="The expand expression to apply on the operation. 'InstanceView' will retrieve the list of instance views of the dedicated host. 'UserData' is not supported for dedicated host.", - enum={"instanceView": "instanceView", "userData": "userData"}, + enum={"instanceView": "instanceView", "resiliencyView": "resiliencyView", "userData": "userData"}, ) return cls._args_schema @@ -139,7 +138,7 @@ def query_parameters(self): "$expand", self.ctx.args.expand, ), **self.serialize_query_param( - "api-version", "2022-11-01", + "api-version", "2024-11-01", required=True, ), } @@ -187,6 +186,10 @@ def _build_schema_on_200(cls): _schema_on_200.sku = AAZObjectType( flags={"required": True}, ) + _schema_on_200.system_data = AAZObjectType( + serialized_name="systemData", + flags={"read_only": True}, + ) _schema_on_200.tags = AAZDictType() _schema_on_200.type = AAZStrType( flags={"read_only": True}, @@ -202,6 +205,7 @@ def _build_schema_on_200(cls): ) properties.instance_view = AAZObjectType( serialized_name="instanceView", + flags={"read_only": True}, ) properties.license_type = AAZStrType( serialized_name="licenseType", @@ -275,6 +279,26 @@ def _build_schema_on_200(cls): sku.name = AAZStrType() sku.tier = AAZStrType() + system_data = cls._schema_on_200.system_data + system_data.created_at = AAZStrType( + serialized_name="createdAt", + ) + system_data.created_by = AAZStrType( + serialized_name="createdBy", + ) + system_data.created_by_type = AAZStrType( + serialized_name="createdByType", + ) + system_data.last_modified_at = AAZStrType( + serialized_name="lastModifiedAt", + ) + system_data.last_modified_by = AAZStrType( + serialized_name="lastModifiedBy", + ) + system_data.last_modified_by_type = AAZStrType( + serialized_name="lastModifiedByType", + ) + tags = cls._schema_on_200.tags tags.Element = AAZStrType() diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_update.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_update.py new file mode 100644 index 00000000000..e186354302d --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_update.py @@ -0,0 +1,545 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "vm host update", +) +class Update(AAZCommand): + """Update a dedicated host. + """ + + _aaz_info = { + "version": "2022-11-01", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/hostgroups/{}/hosts/{}", "2022-11-01"], + ] + } + + AZ_SUPPORT_NO_WAIT = True + + AZ_SUPPORT_GENERIC_UPDATE = True + + def _handler(self, command_args): + super()._handler(command_args) + return self.build_lro_poller(self._execute_operations, self._output) + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.host_group_name = AAZStrArg( + options=["--host-group", "--host-group-name"], + help="The name of the dedicated host group.", + required=True, + id_part="name", + ) + _args_schema.host_name = AAZStrArg( + options=["-n", "--name", "--host-name"], + help="The name of the dedicated host.", + required=True, + id_part="child_name_1", + ) + _args_schema.resource_group = AAZResourceGroupNameArg( + help="Name of resource group. You can configure the default group using `az configure --defaults group=`.", + required=True, + ) + + # define Arg Group "Parameters" + + _args_schema = cls._args_schema + _args_schema.location = AAZResourceLocationArg( + arg_group="Parameters", + help="Resource location", + fmt=AAZResourceLocationArgFormat( + resource_group_arg="resource_group", + ), + ) + _args_schema.sku = AAZObjectArg( + options=["--sku"], + arg_group="Parameters", + help="SKU of the dedicated host for Hardware Generation and VM family. Only name is required to be set. List Microsoft.Compute SKUs for a list of possible values.", + ) + _args_schema.tags = AAZDictArg( + options=["--tags"], + arg_group="Parameters", + help="Resource tags", + nullable=True, + ) + + sku = cls._args_schema.sku + sku.capacity = AAZIntArg( + options=["capacity"], + help="Specifies the number of virtual machines in the scale set.", + nullable=True, + ) + sku.name = AAZStrArg( + options=["name"], + help="The sku name.", + nullable=True, + ) + sku.tier = AAZStrArg( + options=["tier"], + help="Specifies the tier of virtual machines in a scale set.

Possible Values:

**Standard**

**Basic**", + nullable=True, + ) + + tags = cls._args_schema.tags + tags.Element = AAZStrArg( + nullable=True, + ) + + # define Arg Group "Properties" + + _args_schema = cls._args_schema + _args_schema.auto_replace_on_failure = AAZBoolArg( + options=["--auto-replace-on-failure"], + arg_group="Properties", + help="Specifies whether the dedicated host should be replaced automatically in case of a failure. The value is defaulted to 'true' when not provided.", + nullable=True, + ) + _args_schema.license_type = AAZStrArg( + options=["--license-type"], + arg_group="Properties", + help="Specifies the software license type that will be applied to the VMs deployed on the dedicated host.

Possible values are:

**None**

**Windows_Server_Hybrid**

**Windows_Server_Perpetual**

Default: **None**", + nullable=True, + enum={"None": "None", "Windows_Server_Hybrid": "Windows_Server_Hybrid", "Windows_Server_Perpetual": "Windows_Server_Perpetual"}, + ) + _args_schema.platform_fault_domain = AAZIntArg( + options=["--platform-fault-domain"], + arg_group="Properties", + help="Fault domain of the dedicated host within a dedicated host group.", + nullable=True, + fmt=AAZIntArgFormat( + minimum=0, + ), + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + self.DedicatedHostsGet(ctx=self.ctx)() + self.pre_instance_update(self.ctx.vars.instance) + self.InstanceUpdateByJson(ctx=self.ctx)() + self.InstanceUpdateByGeneric(ctx=self.ctx)() + self.post_instance_update(self.ctx.vars.instance) + yield self.DedicatedHostsCreateOrUpdate(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + @register_callback + def pre_instance_update(self, instance): + pass + + @register_callback + def post_instance_update(self, instance): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + class DedicatedHostsGet(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}", + **self.url_parameters + ) + + @property + def method(self): + return "GET" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "hostGroupName", self.ctx.args.host_group_name, + required=True, + ), + **self.serialize_url_param( + "hostName", self.ctx.args.host_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2022-11-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + _UpdateHelper._build_schema_dedicated_host_read(cls._schema_on_200) + + return cls._schema_on_200 + + class DedicatedHostsCreateOrUpdate(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [202]: + return self.client.build_lro_polling( + self.ctx.args.no_wait, + session, + self.on_200_201, + self.on_error, + lro_options={"final-state-via": "azure-async-operation"}, + path_format_arguments=self.url_parameters, + ) + if session.http_response.status_code in [200, 201]: + return self.client.build_lro_polling( + self.ctx.args.no_wait, + session, + self.on_200_201, + self.on_error, + lro_options={"final-state-via": "azure-async-operation"}, + path_format_arguments=self.url_parameters, + ) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}", + **self.url_parameters + ) + + @property + def method(self): + return "PUT" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "hostGroupName", self.ctx.args.host_group_name, + required=True, + ), + **self.serialize_url_param( + "hostName", self.ctx.args.host_name, + required=True, + ), + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "api-version", "2022-11-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Content-Type", "application/json", + ), + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + @property + def content(self): + _content_value, _builder = self.new_content_builder( + self.ctx.args, + value=self.ctx.vars.instance, + ) + + return self.serialize_content(_content_value) + + def on_200_201(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200_201 + ) + + _schema_on_200_201 = None + + @classmethod + def _build_schema_on_200_201(cls): + if cls._schema_on_200_201 is not None: + return cls._schema_on_200_201 + + cls._schema_on_200_201 = AAZObjectType() + _UpdateHelper._build_schema_dedicated_host_read(cls._schema_on_200_201) + + return cls._schema_on_200_201 + + class InstanceUpdateByJson(AAZJsonInstanceUpdateOperation): + + def __call__(self, *args, **kwargs): + self._update_instance(self.ctx.vars.instance) + + def _update_instance(self, instance): + _instance_value, _builder = self.new_content_builder( + self.ctx.args, + value=instance, + typ=AAZObjectType + ) + _builder.set_prop("location", AAZStrType, ".location", typ_kwargs={"flags": {"required": True}}) + _builder.set_prop("properties", AAZObjectType, typ_kwargs={"flags": {"client_flatten": True}}) + _builder.set_prop("sku", AAZObjectType, ".sku", typ_kwargs={"flags": {"required": True}}) + _builder.set_prop("tags", AAZDictType, ".tags") + + properties = _builder.get(".properties") + if properties is not None: + properties.set_prop("autoReplaceOnFailure", AAZBoolType, ".auto_replace_on_failure") + properties.set_prop("licenseType", AAZStrType, ".license_type") + properties.set_prop("platformFaultDomain", AAZIntType, ".platform_fault_domain") + + sku = _builder.get(".sku") + if sku is not None: + sku.set_prop("capacity", AAZIntType, ".capacity") + sku.set_prop("name", AAZStrType, ".name") + sku.set_prop("tier", AAZStrType, ".tier") + + tags = _builder.get(".tags") + if tags is not None: + tags.set_elements(AAZStrType, ".") + + return _instance_value + + class InstanceUpdateByGeneric(AAZGenericInstanceUpdateOperation): + + def __call__(self, *args, **kwargs): + self._update_instance_by_generic( + self.ctx.vars.instance, + self.ctx.generic_update_args + ) + + +class _UpdateHelper: + """Helper class for Update""" + + _schema_dedicated_host_read = None + + @classmethod + def _build_schema_dedicated_host_read(cls, _schema): + if cls._schema_dedicated_host_read is not None: + _schema.id = cls._schema_dedicated_host_read.id + _schema.location = cls._schema_dedicated_host_read.location + _schema.name = cls._schema_dedicated_host_read.name + _schema.properties = cls._schema_dedicated_host_read.properties + _schema.sku = cls._schema_dedicated_host_read.sku + _schema.tags = cls._schema_dedicated_host_read.tags + _schema.type = cls._schema_dedicated_host_read.type + return + + cls._schema_dedicated_host_read = _schema_dedicated_host_read = AAZObjectType() + + dedicated_host_read = _schema_dedicated_host_read + dedicated_host_read.id = AAZStrType( + flags={"read_only": True}, + ) + dedicated_host_read.location = AAZStrType( + flags={"required": True}, + ) + dedicated_host_read.name = AAZStrType( + flags={"read_only": True}, + ) + dedicated_host_read.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + dedicated_host_read.sku = AAZObjectType( + flags={"required": True}, + ) + dedicated_host_read.tags = AAZDictType() + dedicated_host_read.type = AAZStrType( + flags={"read_only": True}, + ) + + properties = _schema_dedicated_host_read.properties + properties.auto_replace_on_failure = AAZBoolType( + serialized_name="autoReplaceOnFailure", + ) + properties.host_id = AAZStrType( + serialized_name="hostId", + flags={"read_only": True}, + ) + properties.instance_view = AAZObjectType( + serialized_name="instanceView", + ) + properties.license_type = AAZStrType( + serialized_name="licenseType", + ) + properties.platform_fault_domain = AAZIntType( + serialized_name="platformFaultDomain", + ) + properties.provisioning_state = AAZStrType( + serialized_name="provisioningState", + flags={"read_only": True}, + ) + properties.provisioning_time = AAZStrType( + serialized_name="provisioningTime", + flags={"read_only": True}, + ) + properties.time_created = AAZStrType( + serialized_name="timeCreated", + flags={"read_only": True}, + ) + properties.virtual_machines = AAZListType( + serialized_name="virtualMachines", + flags={"read_only": True}, + ) + + instance_view = _schema_dedicated_host_read.properties.instance_view + instance_view.asset_id = AAZStrType( + serialized_name="assetId", + flags={"read_only": True}, + ) + instance_view.available_capacity = AAZObjectType( + serialized_name="availableCapacity", + ) + instance_view.statuses = AAZListType() + + available_capacity = _schema_dedicated_host_read.properties.instance_view.available_capacity + available_capacity.allocatable_v_ms = AAZListType( + serialized_name="allocatableVMs", + ) + + allocatable_v_ms = _schema_dedicated_host_read.properties.instance_view.available_capacity.allocatable_v_ms + allocatable_v_ms.Element = AAZObjectType() + + _element = _schema_dedicated_host_read.properties.instance_view.available_capacity.allocatable_v_ms.Element + _element.count = AAZFloatType() + _element.vm_size = AAZStrType( + serialized_name="vmSize", + ) + + statuses = _schema_dedicated_host_read.properties.instance_view.statuses + statuses.Element = AAZObjectType() + + _element = _schema_dedicated_host_read.properties.instance_view.statuses.Element + _element.code = AAZStrType() + _element.display_status = AAZStrType( + serialized_name="displayStatus", + ) + _element.level = AAZStrType() + _element.message = AAZStrType() + _element.time = AAZStrType() + + virtual_machines = _schema_dedicated_host_read.properties.virtual_machines + virtual_machines.Element = AAZObjectType() + + _element = _schema_dedicated_host_read.properties.virtual_machines.Element + _element.id = AAZStrType( + flags={"read_only": True}, + ) + + sku = _schema_dedicated_host_read.sku + sku.capacity = AAZIntType() + sku.name = AAZStrType() + sku.tier = AAZStrType() + + tags = _schema_dedicated_host_read.tags + tags.Element = AAZStrType() + + _schema.id = cls._schema_dedicated_host_read.id + _schema.location = cls._schema_dedicated_host_read.location + _schema.name = cls._schema_dedicated_host_read.name + _schema.properties = cls._schema_dedicated_host_read.properties + _schema.sku = cls._schema_dedicated_host_read.sku + _schema.tags = cls._schema_dedicated_host_read.tags + _schema.type = cls._schema_dedicated_host_read.type + + +__all__ = ["Update"] From a710f9b3234183e744d760e5cf9a70d8da28c594 Mon Sep 17 00:00:00 2001 From: william051200 Date: Thu, 5 Mar 2026 11:13:49 +0800 Subject: [PATCH 2/7] Added example --- .../cli/command_modules/vm/aaz/latest/vm/host/_update.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_update.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_update.py index e186354302d..579a5393f7c 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_update.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_update.py @@ -16,6 +16,10 @@ ) class Update(AAZCommand): """Update a dedicated host. + + :example: Update the 'autoReplaceOnFailure' field of a dedicated host. + az vm host update --host-group my-host-group --name my-host \\ + --resource-group my-resource-group --set autoReplaceOnFailure=True """ _aaz_info = { From 420a4d91fb524babbe41e3298ae609ceea6265c2 Mon Sep 17 00:00:00 2001 From: william051200 Date: Thu, 5 Mar 2026 11:14:01 +0800 Subject: [PATCH 3/7] Migrate commands --- .../cli/command_modules/vm/_client_factory.py | 4 -- .../azure/cli/command_modules/vm/commands.py | 14 +++--- .../azure/cli/command_modules/vm/custom.py | 43 ++++++++++++++----- .../command_modules/vm/operations/vm_host.py | 28 ++++++++++++ 4 files changed, 65 insertions(+), 24 deletions(-) create mode 100644 src/azure-cli/azure/cli/command_modules/vm/operations/vm_host.py diff --git a/src/azure-cli/azure/cli/command_modules/vm/_client_factory.py b/src/azure-cli/azure/cli/command_modules/vm/_client_factory.py index ee6329e9129..4a8691bcf7c 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/_client_factory.py +++ b/src/azure-cli/azure/cli/command_modules/vm/_client_factory.py @@ -87,10 +87,6 @@ def cf_proximity_placement_groups(cli_ctx, _): return _compute_client_factory(cli_ctx).proximity_placement_groups -def cf_dedicated_hosts(cli_ctx, _): - return _compute_client_factory(cli_ctx).dedicated_hosts - - def cf_dedicated_host_groups(cli_ctx, _): return _compute_client_factory(cli_ctx).dedicated_host_groups diff --git a/src/azure-cli/azure/cli/command_modules/vm/commands.py b/src/azure-cli/azure/cli/command_modules/vm/commands.py index 51c50586daf..7f8aad28e7d 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/commands.py +++ b/src/azure-cli/azure/cli/command_modules/vm/commands.py @@ -9,7 +9,7 @@ cf_vmss, cf_images, cf_galleries, cf_gallery_images, cf_gallery_image_versions, cf_proximity_placement_groups, - cf_dedicated_hosts, cf_dedicated_host_groups, + cf_dedicated_host_groups, cf_log_analytics_data_plane, cf_capacity_reservation_groups, cf_capacity_reservations, cf_community_gallery) @@ -138,11 +138,6 @@ def load_command_table(self, _): operations_tmpl='azure.mgmt.compute.operations#ProximityPlacementGroupsOperations.{}', ) - compute_dedicated_host_sdk = CliCommandType( - operations_tmpl="azure.mgmt.compute.operations#DedicatedHostsOperations.{}", - client_factory=cf_dedicated_hosts, - ) - compute_dedicated_host_groups_sdk = CliCommandType( operations_tmpl="azure.mgmt.compute.operations#DedicatedHostGroupsOperations.{}", client_factory=cf_dedicated_host_groups, @@ -386,11 +381,12 @@ def load_command_table(self, _): g.custom_command('delete', 'delete_user') g.custom_command('reset-ssh', 'reset_linux_ssh') - with self.command_group('vm host', compute_dedicated_host_sdk, client_factory=cf_dedicated_hosts, - min_api='2019-03-01') as g: + with self.command_group('vm host') as g: g.custom_command('get-instance-view', 'get_dedicated_host_instance_view') g.custom_command('create', 'create_dedicated_host') - g.generic_update_command('update', setter_name='begin_create_or_update') + + from .operations.vm_host import VMHostUpdate + self.command_table['vm host update'] = VMHostUpdate(loader=self) with self.command_group('vm host group', compute_dedicated_host_groups_sdk, client_factory=cf_dedicated_host_groups, min_api='2019-03-01') as g: diff --git a/src/azure-cli/azure/cli/command_modules/vm/custom.py b/src/azure-cli/azure/cli/command_modules/vm/custom.py index 5380bfeef8f..fa95315628a 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/custom.py +++ b/src/azure-cli/azure/cli/command_modules/vm/custom.py @@ -5924,23 +5924,44 @@ def get_dedicated_host_group_instance_view(client, host_group_name, resource_gro return client.get(resource_group_name, host_group_name, expand="instanceView") -def create_dedicated_host(cmd, client, host_group_name, host_name, resource_group_name, sku, platform_fault_domain=None, +def create_dedicated_host(cmd, host_group_name, host_name, resource_group_name, sku, platform_fault_domain=None, auto_replace_on_failure=None, license_type=None, location=None, tags=None): - DedicatedHostType = cmd.get_models('DedicatedHost') - SkuType = cmd.get_models('Sku') - + from .aaz.latest.vm.host import Create as VmHostCreate location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) - sku = SkuType(name=sku) + command_args = { + 'host_group_name': host_group_name, + 'host_name': host_name, + 'resource_group': resource_group_name, + 'location': location, + 'sku': { + 'name': sku + } + } + + if tags: + command_args['tags'] = tags - host_params = DedicatedHostType(location=location, platform_fault_domain=platform_fault_domain, - auto_replace_on_failure=auto_replace_on_failure, license_type=license_type, - sku=sku, tags=tags) + if auto_replace_on_failure is not None: + command_args['auto_replace_on_failure'] = auto_replace_on_failure - return client.begin_create_or_update(resource_group_name, host_group_name, host_name, parameters=host_params) + if license_type: + command_args['license_type'] = license_type + if platform_fault_domain: + command_args['platform_fault_domain'] = platform_fault_domain -def get_dedicated_host_instance_view(client, host_group_name, host_name, resource_group_name): - return client.get(resource_group_name, host_group_name, host_name, expand="instanceView") + return VmHostCreate(cli_ctx=cmd.cli_ctx)(command_args=command_args) + + +def get_dedicated_host_instance_view(cmd, host_group_name, host_name, resource_group_name): + from .aaz.latest.vm.host import Show as VmHostShow + command_args = { + 'host_group_name': host_group_name, + 'host_name': host_name, + 'resource_group': resource_group_name, + 'expand': 'instanceView' + } + return VmHostShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) # endregion diff --git a/src/azure-cli/azure/cli/command_modules/vm/operations/vm_host.py b/src/azure-cli/azure/cli/command_modules/vm/operations/vm_host.py new file mode 100644 index 00000000000..e68d728cc21 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/operations/vm_host.py @@ -0,0 +1,28 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=no-self-use, line-too-long, protected-access, too-few-public-methods, unused-argument +from knack.log import get_logger + +from ..aaz.latest.vm.host import Update as _VMHostUpdate + +logger = get_logger(__name__) + + +class VMHostUpdate(_VMHostUpdate): + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + args_schema = super()._build_arguments_schema(*args, **kwargs) + + args_schema.host_group_name._options = ['--host-group'] + args_schema.host_name._options = ['-n', '--name'] + + args_schema.location._registered = False + args_schema.sku._registered = False + args_schema.tags._registered = False + args_schema.auto_replace_on_failure._registered = False + args_schema.license_type._registered = False + args_schema.platform_fault_domain._registered = False + + return args_schema From 77459458964a23f4ff469a4d26af2ac0f629d9bf Mon Sep 17 00:00:00 2001 From: william051200 Date: Thu, 5 Mar 2026 11:33:39 +0800 Subject: [PATCH 4/7] Update code --- .../azure/cli/command_modules/vm/custom.py~ | 6685 +++++++++++++++++ 1 file changed, 6685 insertions(+) create mode 100644 src/azure-cli/azure/cli/command_modules/vm/custom.py~ diff --git a/src/azure-cli/azure/cli/command_modules/vm/custom.py~ b/src/azure-cli/azure/cli/command_modules/vm/custom.py~ new file mode 100644 index 00000000000..fa95315628a --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/custom.py~ @@ -0,0 +1,6685 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# +# Generation mode: Incremental +# -------------------------------------------------------------------------- + +# pylint: disable=no-self-use, too-many-lines, no-else-return +# pylint: disable=protected-access +import json +import os + +import requests + +# the urlopen is imported for automation purpose +from urllib.request import urlopen # noqa, pylint: disable=import-error,unused-import,ungrouped-imports + +from knack.log import get_logger +from knack.util import CLIError +from azure.cli.core.azclierror import ( + ResourceNotFoundError, + ValidationError, + RequiredArgumentMissingError, + ArgumentUsageError +) + +from azure.cli.command_modules.vm._validators import _get_resource_group_from_vault_name +from azure.cli.core.commands.validators import validate_file_or_dict + +from azure.cli.core.commands import LongRunningOperation, DeploymentOutputLongRunningOperation +from azure.cli.core.commands.client_factory import get_mgmt_service_client +from azure.cli.core.profiles import ResourceType +from azure.cli.core.util import sdk_no_wait + +from ._vm_utils import read_content_if_is_file, import_aaz_by_profile, IdentityType +from ._vm_diagnostics_templates import get_default_diag_config + +from ._actions import (load_images_from_aliases_doc, load_extension_images_thru_services, + load_images_thru_services, _get_latest_image_version, _get_latest_image_version_by_aaz) +from ._client_factory import (_compute_client_factory, cf_vm_image_term) + +from .aaz.latest.vm.disk import AttachDetachDataDisk +from .aaz.latest.vm import Update as UpdateVM + +from .generated.custom import * # noqa: F403, pylint: disable=unused-wildcard-import,wildcard-import + +try: + from .manual.custom import * # noqa: F403, pylint: disable=unused-wildcard-import,wildcard-import +except ImportError: + pass + +logger = get_logger(__name__) + + +# Use the same name by portal, so people can update from both cli and portal +# (VM doesn't allow multiple handlers for the same extension) +_ACCESS_EXT_HANDLER_NAME = 'enablevmaccess' + +_LINUX_ACCESS_EXT = 'VMAccessForLinux' +_WINDOWS_ACCESS_EXT = 'VMAccessAgent' +_LINUX_DIAG_EXT = 'LinuxDiagnostic' +_WINDOWS_DIAG_EXT = 'IaaSDiagnostics' +_LINUX_OMS_AGENT_EXT = 'OmsAgentForLinux' +_WINDOWS_OMS_AGENT_EXT = 'MicrosoftMonitoringAgent' +extension_mappings = { + _LINUX_ACCESS_EXT: { + 'version': '1.5', + 'publisher': 'Microsoft.OSTCExtensions' + }, + _WINDOWS_ACCESS_EXT: { + 'version': '2.4', + 'publisher': 'Microsoft.Compute' + }, + _LINUX_DIAG_EXT: { + 'version': '3.0', + 'publisher': 'Microsoft.Azure.Diagnostics' + }, + _WINDOWS_DIAG_EXT: { + 'version': '1.5', + 'publisher': 'Microsoft.Azure.Diagnostics' + }, + _LINUX_OMS_AGENT_EXT: { + 'version': '1.0', + 'publisher': 'Microsoft.EnterpriseCloud.Monitoring' + }, + _WINDOWS_OMS_AGENT_EXT: { + 'version': '1.0', + 'publisher': 'Microsoft.EnterpriseCloud.Monitoring' + } +} + +remove_basic_option_msg = "It's recommended to create with `%s`. " \ + "Please be aware that Basic option will be removed in the future." + + +def _construct_identity_info(identity_scope, identity_role, implicit_identity, external_identities): + info = {} + if identity_scope: + info['scope'] = identity_scope + info['role'] = str(identity_role) # could be DefaultStr, so convert to string + info['userAssignedIdentities'] = external_identities or {} + info['systemAssignedIdentity'] = implicit_identity or '' + return info + + +# for injecting test seams to produce predicatable role assignment id for playback +def _gen_guid(): + import uuid + return uuid.uuid4() + + +def _get_access_extension_upgrade_info(extensions, name): + version = extension_mappings[name]['version'] + publisher = extension_mappings[name]['publisher'] + + auto_upgrade = None + + if extensions: + extension = next((e for e in extensions if e.name == name), None) + from packaging.version import parse # pylint: disable=no-name-in-module,import-error + if extension and parse(extension.type_handler_version) < parse(version): + auto_upgrade = True + elif extension and parse(extension.type_handler_version) > parse(version): + version = extension.type_handler_version + + return publisher, version, auto_upgrade + + +# separated for aaz based implementation +def _get_access_extension_upgrade_info_aaz(extensions, name): + version = extension_mappings[name]['version'] + publisher = extension_mappings[name]['publisher'] + + auto_upgrade = None + + if extensions: + extension = next((e for e in extensions if e.get('name', '') == name), None) + from packaging.version import parse # pylint: disable=no-name-in-module,import-error + if extension and parse(extension['typeHandlerVersion']) < parse(version): + auto_upgrade = True + elif extension and parse(extension['typeHandlerVersion']) > parse(version): + version = extension['typeHandlerVersion'] + + return publisher, version, auto_upgrade + + +def _get_extension_instance_name(instance_view, publisher, extension_type_name, + suggested_name=None): + extension_instance_name = suggested_name or extension_type_name + full_type_name = '.'.join([publisher, extension_type_name]) + if instance_view.extensions: + ext = next((x for x in instance_view.extensions + if x.type and (x.type.lower() == full_type_name.lower())), None) + if ext: + extension_instance_name = ext.name + return extension_instance_name + + +# separated for aaz based implementation +def _get_extension_instance_name_aaz(instance_view, publisher, extension_type_name, + suggested_name=None): + extension_instance_name = suggested_name or extension_type_name + full_type_name = '.'.join([publisher, extension_type_name]) + if extensions := instance_view.get('extensions', []): + ext = next((x for x in extensions if x.get('type', '').lower() == full_type_name.lower()), None) + if ext: + extension_instance_name = ext['name'] + return extension_instance_name + + +def _get_storage_management_client(cli_ctx): + return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_STORAGE) + + +def _get_disk_lun(data_disks): + # start from 0, search for unused int for lun + if not data_disks: + return 0 + + existing_luns = sorted([d.lun for d in data_disks]) + for i, current in enumerate(existing_luns): + if current != i: + return i + return len(existing_luns) + + +def _get_disk_lun_by_aaz(data_disks): + # start from 0, search for unused int for lun + if not data_disks: + return 0 + + existing_luns = sorted([d['lun'] for d in data_disks]) + for i, current in enumerate(existing_luns): + if current != i: + return i + return len(existing_luns) + + +def _get_private_config(cli_ctx, resource_group_name, storage_account): + storage_mgmt_client = _get_storage_management_client(cli_ctx) + # pylint: disable=no-member + keys = storage_mgmt_client.storage_accounts.list_keys(resource_group_name, storage_account).keys + + private_config = { + 'storageAccountName': storage_account, + 'storageAccountKey': keys[0].value + } + return private_config + + +def _get_resource_group_location(cli_ctx, resource_group_name): + client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES) + # pylint: disable=no-member + return client.resource_groups.get(resource_group_name).location + + +def _get_sku_object(cmd, sku): + if cmd.supported_api_version(min_api='2017-03-30'): + DiskSku = cmd.get_models('DiskSku') + return DiskSku(name=sku) + return sku + + +def get_hyper_v_generation_from_vmss(cli_ctx, image_ref, location): # pylint: disable=too-many-return-statements + from ._vm_utils import (is_valid_image_version_id, parse_gallery_image_id, is_valid_vm_image_id, parse_vm_image_id, + parse_shared_gallery_image_id, parse_community_gallery_image_id) + if image_ref is None: + return None + if image_ref.id: + from ._client_factory import _compute_client_factory + if is_valid_image_version_id(image_ref.id): + image_info = parse_gallery_image_id(image_ref.id) + client = _compute_client_factory(cli_ctx, subscription_id=image_info[0]).gallery_images + gallery_image_info = client.get( + resource_group_name=image_info[1], gallery_name=image_info[2], gallery_image_name=image_info[3]) + return gallery_image_info.hyper_v_generation if hasattr(gallery_image_info, 'hyper_v_generation') else None + if is_valid_vm_image_id(image_ref.id): + sub, rg, image_name = parse_vm_image_id(image_ref.id) + client = _compute_client_factory(cli_ctx, subscription_id=sub).images + image_info = client.get(rg, image_name) + return image_info.hyper_v_generation if hasattr(image_info, 'hyper_v_generation') else None + + if image_ref.shared_gallery_image_id is not None: + from ._client_factory import cf_shared_gallery_image + image_info = parse_shared_gallery_image_id(image_ref.shared_gallery_image_id) + gallery_image_info = cf_shared_gallery_image(cli_ctx).get( + location=location, gallery_unique_name=image_info[0], gallery_image_name=image_info[1]) + return gallery_image_info.hyper_v_generation if hasattr(gallery_image_info, 'hyper_v_generation') else None + + if image_ref.community_gallery_image_id is not None: + from ._client_factory import cf_community_gallery_image + image_info = parse_community_gallery_image_id(image_ref.community_gallery_image_id) + gallery_image_info = cf_community_gallery_image(cli_ctx).get( + location=location, public_gallery_name=image_info[0], gallery_image_name=image_info[1]) + return gallery_image_info.hyper_v_generation if hasattr(gallery_image_info, 'hyper_v_generation') else None + + if image_ref.offer and image_ref.publisher and image_ref.sku and image_ref.version: + from ._client_factory import cf_vm_image + version = image_ref.version + if version.lower() == 'latest': + from ._actions import _get_latest_image_version + version = _get_latest_image_version(cli_ctx, location, image_ref.publisher, image_ref.offer, + image_ref.sku) + vm_image_info = cf_vm_image(cli_ctx, '').get( + location, image_ref.publisher, image_ref.offer, image_ref.sku, version) + return vm_image_info.hyper_v_generation if hasattr(vm_image_info, 'hyper_v_generation') else None + + return None + + +def get_hyper_v_generation_from_vmss_by_aaz(cli_ctx, image_ref, location): # pylint: disable=too-many-return-statements + from ._vm_utils import (is_valid_image_version_id, parse_gallery_image_id, is_valid_vm_image_id, parse_vm_image_id, + parse_shared_gallery_image_id, parse_community_gallery_image_id) + if image_ref is None: + return None + if image_ref.get("id", None) is not None: + from ._client_factory import _compute_client_factory + if is_valid_image_version_id(image_ref["id"]): + image_info = parse_gallery_image_id(image_ref["id"]) + client = _compute_client_factory(cli_ctx, subscription_id=image_info[0]).gallery_images + gallery_image_info = client.get( + resource_group_name=image_info[1], gallery_name=image_info[2], gallery_image_name=image_info[3]) + return gallery_image_info.hyper_v_generation if hasattr(gallery_image_info, 'hyper_v_generation') else None + if is_valid_vm_image_id(image_ref["id"]): + sub, rg, image_name = parse_vm_image_id(image_ref["id"]) + client = _compute_client_factory(cli_ctx, subscription_id=sub).images + image_info = client.get(rg, image_name) + return image_info.hyper_v_generation if hasattr(image_info, 'hyper_v_generation') else None + + if image_ref.get("sharedGalleryImageId", None) is not None: + from ._client_factory import cf_shared_gallery_image + image_info = parse_shared_gallery_image_id(image_ref["sharedGalleryImageId"]) + gallery_image_info = cf_shared_gallery_image(cli_ctx).get( + location=location, gallery_unique_name=image_info[0], gallery_image_name=image_info[1]) + return gallery_image_info.hyper_v_generation if hasattr(gallery_image_info, 'hyper_v_generation') else None + + if image_ref.get("communityGalleryImageId", None) is not None: + from ._client_factory import cf_community_gallery_image + image_info = parse_community_gallery_image_id(image_ref["communityGalleryImageId"]) + gallery_image_info = cf_community_gallery_image(cli_ctx).get( + location=location, public_gallery_name=image_info[0], gallery_image_name=image_info[1]) + return gallery_image_info.hyper_v_generation if hasattr(gallery_image_info, 'hyper_v_generation') else None + + if image_ref.get("offer", None) is not None and image_ref.get("publisher", None) is not None \ + and image_ref.get("sku", None) is not None and image_ref.get("version", None) is not None: + from ._client_factory import cf_vm_image + version = image_ref["version"] + if version.lower() == 'latest': + from ._actions import _get_latest_image_version + version = _get_latest_image_version(cli_ctx, location, image_ref["publisher"], image_ref["offer"], + image_ref["sku"]) + vm_image_info = cf_vm_image(cli_ctx, '').get( + location, image_ref["publisher"], image_ref["offer"], image_ref["sku"], version) + return vm_image_info.hyper_v_generation if hasattr(vm_image_info, 'hyper_v_generation') else None + + return None + + +def _is_linux_os(vm): + os_type = None + if vm and vm.storage_profile and vm.storage_profile.os_disk and vm.storage_profile.os_disk.os_type: + os_type = vm.storage_profile.os_disk.os_type + if os_type: + return os_type.lower() == 'linux' + # the os_type could be None for VM scaleset, let us check out os configurations + if vm.os_profile.linux_configuration: + return bool(vm.os_profile.linux_configuration) + return False + + +def _is_linux_os_by_aaz(vm): + os_type = None + if vm.get("storage_profile", {}).get("os_disk", {}).get("os_type", None) is not None: + os_type = vm["storage_profile"]["os_disk"]["os_type"] + if os_type: + return os_type.lower() == 'linux' + # the os_type could be None for VM scaleset, let us check out os configurations + if vm.get("os_profile", {}).get("linux_configuration", None) is not None: + return bool(vm["os_profile"]["linux_configuration"]) + return False + + +# separated for aaz implementation +def _is_linux_os_aaz(vm): + if os_type := vm.get('storageProfile', {}).get('osDisk', {}).get('osType', None): + return os_type.lower() == 'linux' + # the os_type could be None for VM scaleset, let us check out os configurations + if linux_config := vm.get('osProfile', {}).get('linuxConfiguration', ''): + return bool(linux_config) + return False + + +def _merge_secrets(secrets): + """ + Merge a list of secrets. Each secret should be a dict fitting the following JSON structure: + [{ "sourceVault": { "id": "value" }, + "vaultCertificates": [{ "certificateUrl": "value", + "certificateStore": "cert store name (only on windows)"}] }] + The array of secrets is merged on sourceVault.id. + :param secrets: + :return: + """ + merged = {} + vc_name = 'vaultCertificates' + for outer in secrets: + for secret in outer: + if secret['sourceVault']['id'] not in merged: + merged[secret['sourceVault']['id']] = [] + merged[secret['sourceVault']['id']] = \ + secret[vc_name] + merged[secret['sourceVault']['id']] + + # transform the reduced map to vm format + formatted = [{'sourceVault': {'id': source_id}, + 'vaultCertificates': value} + for source_id, value in list(merged.items())] + return formatted + + +def _normalize_extension_version(cli_ctx, publisher, vm_extension_name, version, location): + + def _trim_away_build_number(version): + # workaround a known issue: the version must only contain "major.minor", even though + # "extension image list" gives more detail + return '.'.join(version.split('.')[0:2]) + + if not version: + result = load_extension_images_thru_services(cli_ctx, publisher, vm_extension_name, None, location, + show_latest=True, partial_match=False) + if not result: + raise CLIError('Failed to find the latest version for the extension "{}"'.format(vm_extension_name)) + # with 'show_latest' enabled, we will only get one result. + version = result[0]['version'] + + version = _trim_away_build_number(version) + return version + + +def _parse_rg_name(strid): + '''From an ID, extract the contained (resource group, name) tuple.''' + from azure.mgmt.core.tools import parse_resource_id + parts = parse_resource_id(strid) + return (parts['resource_group'], parts['name']) + + +def _set_sku(cmd, instance, sku): + if cmd.supported_api_version(min_api='2017-03-30'): + instance.sku = cmd.get_models('DiskSku')(name=sku) + else: + instance.account_type = sku + + +def _show_missing_access_warning(resource_group, name, command): + warn = ("No access was given yet to the '{1}', because '--scope' was not provided. " + "You should setup by creating a role assignment, e.g. " + "'az role assignment create --assignee --role contributor -g {0}' " + "would let it access the current resource group. To get the pricipal id, run " + "'az {2} show -g {0} -n {1} --query \"identity.principalId\" -otsv'".format(resource_group, name, command)) + logger.warning(warn) + + +def _parse_aux_subscriptions(resource_id): + from azure.mgmt.core.tools import is_valid_resource_id, parse_resource_id + if is_valid_resource_id(resource_id): + res = parse_resource_id(resource_id) + return [res['subscription']] + return None + + +# Hide extension information from output as the info is not correct and unhelpful; also +# commands using it mean to hide the extension concept from users. +class ExtensionUpdateLongRunningOperation(LongRunningOperation): # pylint: disable=too-few-public-methods + pass + + +# region Disks (Managed) +def create_managed_disk(cmd, resource_group_name, disk_name, location=None, # pylint: disable=too-many-locals, too-many-branches, too-many-statements, line-too-long + size_gb=None, sku='Premium_LRS', os_type=None, + source=None, for_upload=None, upload_size_bytes=None, # pylint: disable=unused-argument + # below are generated internally from 'source' + source_blob_uri=None, source_disk=None, source_snapshot=None, source_restore_point=None, + source_storage_account_id=None, no_wait=False, tags=None, zone=None, + disk_iops_read_write=None, disk_mbps_read_write=None, hyper_v_generation=None, + encryption_type=None, disk_encryption_set=None, max_shares=None, + disk_iops_read_only=None, disk_mbps_read_only=None, + image_reference=None, image_reference_lun=None, + gallery_image_reference=None, gallery_image_reference_lun=None, + network_access_policy=None, disk_access=None, logical_sector_size=None, + tier=None, enable_bursting=None, edge_zone=None, security_type=None, support_hibernation=None, + public_network_access=None, accelerated_network=None, architecture=None, + data_access_auth_mode=None, gallery_image_reference_type=None, security_data_uri=None, + upload_type=None, secure_vm_disk_encryption_set=None, performance_plus=None, + optimized_for_frequent_attach=None, security_metadata_uri=None, action_on_disk_delay=None, + supported_security_option=None): + + from azure.mgmt.core.tools import resource_id, is_valid_resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + if security_data_uri: + option = 'ImportSecure' + elif source_blob_uri: + option = 'Import' + elif source_disk or source_snapshot: + option = 'Copy' + elif source_restore_point: + option = 'Restore' + elif upload_type == 'Upload': + option = 'Upload' + elif upload_type == 'UploadWithSecurityData': + option = 'UploadPreparedSecure' + elif image_reference or gallery_image_reference: + option = 'FromImage' + else: + option = 'Empty' + + if source_storage_account_id is None and source_blob_uri is not None: + subscription_id = get_subscription_id(cmd.cli_ctx) + storage_account_name = source_blob_uri.split('.')[0].split('/')[-1] + source_storage_account_id = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Storage', type='storageAccounts', name=storage_account_name) + + if upload_size_bytes is not None and not upload_type: + raise RequiredArgumentMissingError( + 'usage error: --upload-size-bytes should be used together with --upload-type') + + from ._constants import COMPATIBLE_SECURITY_TYPE_VALUE, UPGRADE_SECURITY_HINT + if image_reference is not None: + if not is_valid_resource_id(image_reference): + # URN or name + terms = image_reference.split(':') + if len(terms) == 4: # URN + disk_publisher, disk_offer, disk_sku, disk_version = terms[0], terms[1], terms[2], terms[3] + if disk_version.lower() == 'latest': + disk_version = _get_latest_image_version_by_aaz(cmd.cli_ctx, location, disk_publisher, disk_offer, + disk_sku) + else: # error + raise CLIError('usage error: --image-reference should be ID or URN (publisher:offer:sku:version).') + else: + from azure.mgmt.core.tools import parse_resource_id + terms = parse_resource_id(image_reference) + disk_publisher, disk_offer, disk_sku, disk_version = \ + terms['child_name_1'], terms['child_name_3'], terms['child_name_4'], terms['child_name_5'] + + from .aaz.latest.vm.image import Show as VmImageShow + command_args = { + 'location': location, + 'offer': disk_offer, + 'publisher': disk_publisher, + 'sku': disk_sku, + 'version': disk_version, + } + response = VmImageShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) + + if response.get('hyper_v_generation'): + if response.get('hyper_v_generation') == 'V1': + logger.warning(UPGRADE_SECURITY_HINT) + elif response.get('hyper_v_generation') == 'V2': + # set default value of hyper_v_generation + if hyper_v_generation == 'V1': + hyper_v_generation = 'V2' + # set default value of security_type + if not security_type: + security_type = 'TrustedLaunch' + if security_type != 'TrustedLaunch': + logger.warning(UPGRADE_SECURITY_HINT) + + # image_reference is an ID now + image_reference = {'id': response.get('id')} + if image_reference_lun is not None: + image_reference['lun'] = image_reference_lun + + if gallery_image_reference is not None: + if not security_type: + security_type = 'Standard' + if security_type != 'TrustedLaunch': + logger.warning(UPGRADE_SECURITY_HINT) + + key = gallery_image_reference_type if gallery_image_reference_type else 'id' + gallery_image_reference = {key: gallery_image_reference} + if gallery_image_reference_lun is not None: + gallery_image_reference['lun'] = gallery_image_reference_lun + + creation_data = { + "create_option": option, + "source_uri": source_blob_uri, + "image_reference": image_reference, + "gallery_image_reference": gallery_image_reference, + "source_resource_id": source_disk or source_snapshot or source_restore_point, + "storage_account_id": source_storage_account_id, + "upload_size_bytes": upload_size_bytes, + "logical_sector_size": logical_sector_size, + "security_data_uri": security_data_uri, + "performance_plus": performance_plus, + "security_metadata_uri": security_metadata_uri, + } + + if size_gb is None and option == "Empty": + raise RequiredArgumentMissingError( + 'usage error: --size-gb is required to create an empty disk') + if upload_size_bytes is None and upload_type: + raise RequiredArgumentMissingError( + 'usage error: --upload-size-bytes is required to create a disk for upload') + + if disk_encryption_set is not None and not is_valid_resource_id(disk_encryption_set): + disk_encryption_set = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set) + + if disk_access is not None and not is_valid_resource_id(disk_access): + disk_access = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskAccesses', name=disk_access) + + if secure_vm_disk_encryption_set is not None and not is_valid_resource_id(secure_vm_disk_encryption_set): + secure_vm_disk_encryption_set = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=secure_vm_disk_encryption_set) + + encryption = None + if disk_encryption_set or encryption_type: + encryption = { + "type": encryption_type, + "disk_encryption_set_id": disk_encryption_set + } + + sku = {"name": sku} + + args = { + "location": location, + "creation_data": creation_data, + "tags": tags or {}, + "sku": sku, + "disk_size_gb": size_gb, + "os_type": os_type, + "encryption": encryption + } + + if hyper_v_generation: + args["hyper_v_generation"] = hyper_v_generation + + if zone: + args["zones"] = zone + if disk_iops_read_write is not None: + args["disk_iops_read_write"] = disk_iops_read_write + if disk_mbps_read_write is not None: + args["disk_m_bps_read_write"] = disk_mbps_read_write + if max_shares is not None: + args["max_shares"] = max_shares + if disk_iops_read_only is not None: + args["disk_iops_read_only"] = disk_iops_read_only + if disk_mbps_read_only is not None: + args["disk_m_bps_read_only"] = disk_mbps_read_only + if network_access_policy is not None: + args["network_access_policy"] = network_access_policy + if disk_access is not None: + args["disk_access_id"] = disk_access + if tier is not None: + args["tier"] = tier + if enable_bursting is not None: + args["bursting_enabled"] = enable_bursting + if edge_zone is not None: + args["extended_location"] = edge_zone + # The `Standard` is used for backward compatibility to allow customers to keep their current behavior + # after changing the default values to Trusted Launch VMs in the future. + if security_type and security_type != COMPATIBLE_SECURITY_TYPE_VALUE: + args["security_profile"] = {'securityType': security_type} + if secure_vm_disk_encryption_set: + args["security_profile"]["secure_vm_disk_encryption_set_id"] = secure_vm_disk_encryption_set + if support_hibernation is not None: + args["supports_hibernation"] = support_hibernation + if public_network_access is not None: + args["public_network_access"] = public_network_access + if accelerated_network is not None or architecture is not None or supported_security_option is not None: + if args.get("supported_capabilities", None) is None: + supported_capabilities = { + "accelerated_network": accelerated_network, + "architecture": architecture, + "supported_security_option": supported_security_option + } + args["supported_capabilities"] = supported_capabilities + else: + args["supported_capabilities"]["accelerated_network"] = accelerated_network + args["supported_capabilities"]["architecture"] = architecture + args["supported_capabilities"]["supported_security_option"] = supported_security_option + if data_access_auth_mode is not None: + args["data_access_auth_mode"] = data_access_auth_mode + if optimized_for_frequent_attach is not None: + args["optimized_for_frequent_attach"] = optimized_for_frequent_attach + if action_on_disk_delay is not None: + args["availability_policy"] = {'action_on_disk_delay': action_on_disk_delay} + + args["no_wait"] = no_wait + args["disk_name"] = disk_name + args["resource_group"] = resource_group_name + + from .aaz.latest.disk import Create + return Create(cli_ctx=cmd.cli_ctx)(command_args=args) + + +# region Images (Managed) +def create_image(cmd, resource_group_name, name, source, os_type=None, data_disk_sources=None, location=None, # pylint: disable=too-many-locals,unused-argument + # below are generated internally from 'source' and 'data_disk_sources' + source_virtual_machine=None, storage_sku=None, hyper_v_generation=None, + os_blob_uri=None, data_blob_uris=None, + os_snapshot=None, data_snapshots=None, + os_disk=None, os_disk_caching=None, data_disks=None, data_disk_caching=None, + tags=None, zone_resilient=None, edge_zone=None): + if source_virtual_machine: + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + image_storage_profile = None if zone_resilient is None else {"zone_resilient": zone_resilient} + args = { + "location": location, + "source_virtual_machine": {"id": source_virtual_machine}, + "storage_profile": image_storage_profile, + "tags": tags or {} + } + else: + os_disk = { + "os_type": os_type, + "os_state": "Generalized", + "caching": os_disk_caching, + "snapshot": {"id": os_snapshot} if os_snapshot else None, + "managed_disk": {"id": os_disk} if os_disk else None, + "blob_uri": os_blob_uri, + "storage_account_type": storage_sku + } + all_data_disks = [] + lun = 0 + if data_blob_uris: + for d in data_blob_uris: + all_data_disks.append({ + "lun": lun, + "blob_uri": d, + "caching": data_disk_caching + }) + lun += 1 + if data_snapshots: + for d in data_snapshots: + all_data_disks.append({ + "lun": lun, + "snapshot": {"id": d}, + "caching": data_disk_caching + }) + lun += 1 + if data_disks: + for d in data_disks: + all_data_disks.append({ + "lun": lun, + "managed_disk": {"id": d}, + "caching": data_disk_caching + }) + lun += 1 + + image_storage_profile = { + "os_disk": os_disk, + "data_disks": all_data_disks + } + if zone_resilient is not None: + image_storage_profile["zone_resilient"] = zone_resilient + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + # pylint disable=no-member + args = { + "location": location, + "storage_profile": image_storage_profile, + "tags": tags or {} + } + + if hyper_v_generation: + args["hyper_v_generation"] = hyper_v_generation + + if edge_zone: + args["extended_location"] = edge_zone + + args["image_name"] = name + args["resource_group"] = resource_group_name + + from .aaz.latest.image import Create + return Create(cli_ctx=cmd.cli_ctx)(command_args=args) + + +# region Snapshots +# pylint: disable=unused-argument,too-many-locals +def create_snapshot(cmd, resource_group_name, snapshot_name, location=None, size_gb=None, sku='Standard_LRS', + source=None, for_upload=None, copy_start=None, incremental=None, + # below are generated internally from 'source' + source_blob_uri=None, source_disk=None, source_snapshot=None, source_storage_account_id=None, + hyper_v_generation=None, tags=None, no_wait=False, disk_encryption_set=None, + encryption_type=None, network_access_policy=None, disk_access=None, edge_zone=None, + public_network_access=None, accelerated_network=None, architecture=None, + elastic_san_resource_id=None, bandwidth_copy_speed=None, instant_access_duration_minutes=None): + from azure.mgmt.core.tools import resource_id, is_valid_resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + if source_blob_uri: + option = 'Import' + elif source_disk or source_snapshot: + option = 'CopyStart' if copy_start else 'Copy' + elif for_upload: + option = 'Upload' + elif elastic_san_resource_id: + option = 'CopyFromSanSnapshot' + else: + option = 'Empty' + + creation_data = { + 'create_option': option, + 'source_uri': source_blob_uri, + 'image_reference': None, + 'source_resource_id': source_disk or source_snapshot, + 'storage_account_id': source_storage_account_id, + 'elastic_san_resource_id': elastic_san_resource_id, + 'provisioned_bandwidth_copy_speed': bandwidth_copy_speed, + 'instant_access_duration_minutes': instant_access_duration_minutes + } + + if size_gb is None and option == 'Empty': + raise CLIError('Please supply size for the snapshots') + + if disk_encryption_set is not None and not is_valid_resource_id(disk_encryption_set): + disk_encryption_set = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set) + + if disk_access is not None and not is_valid_resource_id(disk_access): + disk_access = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskAccesses', name=disk_access) + + if disk_encryption_set is not None and encryption_type is None: + raise CLIError('usage error: Please specify --encryption-type.') + if encryption_type is not None: + encryption = { + 'type': encryption_type, + 'disk_encryption_set_id': disk_encryption_set + } + else: + encryption = None + + args = { + 'location': location, + 'creation_data': creation_data, + 'tags': tags or {}, + 'sku': {'name': sku}, + 'disk_size_gb': size_gb, + 'incremental': incremental, + 'encryption': encryption, + } + + if hyper_v_generation: + args['hyper_v_generation'] = hyper_v_generation + if network_access_policy is not None: + args['network_access_policy'] = network_access_policy + if disk_access is not None: + args['disk_access_id'] = disk_access + if edge_zone: + args['extended_location'] = edge_zone + if public_network_access is not None: + args['public_network_access'] = public_network_access + if accelerated_network is not None or architecture is not None: + if args.get('supported_capabilities', None) is None: + supported_capabilities = { + 'accelerated_network': accelerated_network, + 'architecture': architecture + } + args['supported_capabilities'] = supported_capabilities + else: + args['supported_capabilities']['accelerated_network'] = accelerated_network + args['supported_capabilities']['architecture'] = architecture + + args['snapshot_name'] = snapshot_name + args['resource_group'] = resource_group_name + args['no_wait'] = no_wait + + from .aaz.latest.snapshot import Create + return Create(cli_ctx=cmd.cli_ctx)(command_args=args) + + +# region VirtualMachines Identity +def show_vm_identity(cmd, resource_group_name, vm_name): + vm = get_vm_by_aaz(cmd, resource_group_name, vm_name) + + identity = vm.get("identity", {}) if vm else None + + if identity and not identity.get('userAssignedIdentities'): + identity['userAssignedIdentities'] = None + + return identity or None + + +def show_vmss_identity(cmd, resource_group_name, vm_name): + vm = get_vmss_by_aaz(cmd, resource_group_name, vm_name) + return vm.get("identity", {}) if vm else None + + +def assign_vm_identity(cmd, resource_group_name, vm_name, assign_identity=None, identity_role=None, + identity_role_id=None, identity_scope=None): + identity, _, external_identities, enable_local_identity = _build_identities_info(assign_identity) + + command_args = {'resource_group': resource_group_name, 'vm_name': vm_name} + + def getter(): + return get_vm_by_aaz(cmd, resource_group_name, vm_name) + + def setter(vm, external_identities=external_identities): + if vm.get('identity', {}).get('type', None) == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value: + identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value + elif vm.get('identity', {}).get('type', None) == IdentityType.SYSTEM_ASSIGNED.value and external_identities: + identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value + elif vm.get('identity', {}).get('type', None) == IdentityType.USER_ASSIGNED.value and enable_local_identity: + identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value + elif external_identities and enable_local_identity: + identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value + elif external_identities: + identity_types = IdentityType.USER_ASSIGNED.value + else: + identity_types = IdentityType.SYSTEM_ASSIGNED.value + + if identity_types == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value: + command_args['mi_system_assigned'] = "True" + command_args['mi_user_assigned'] = [] + elif identity_types == IdentityType.USER_ASSIGNED.value: + command_args['mi_user_assigned'] = [] + else: + command_args['mi_system_assigned'] = "True" + command_args['mi_user_assigned'] = [] + + if vm.get('identity', {}).get('userAssignedIdentities', None): + for key in vm.get('identity').get('userAssignedIdentities').keys(): + command_args['mi_user_assigned'].append(key) + + if identity.get('userAssignedIdentities'): + for key in identity.get('userAssignedIdentities', {}).keys(): + if key not in command_args['mi_user_assigned']: + command_args['mi_user_assigned'].append(key) + + from .operations.vm import VMPatch + update_vm_identity = VMPatch(cli_ctx=cmd.cli_ctx)(command_args=command_args) + LongRunningOperation(cmd.cli_ctx)(update_vm_identity) + result = update_vm_identity.result() + return result + + from ._vm_utils import assign_identity as assign_identity_helper + assign_identity_helper(cmd.cli_ctx, getter, setter, identity_role=identity_role_id, identity_scope=identity_scope) + + vm = getter() + return _construct_identity_info( + identity_scope, + identity_role, + vm.get('identity').get('principalId') if vm.get('identity') else None, + vm.get('identity').get('userAssignedIdentities') if vm.get('identity') else None) +# endregion + + +# region VirtualMachines +def capture_vm(cmd, resource_group_name, vm_name, vhd_name_prefix, + storage_container='vhds', overwrite=True): + VirtualMachineCaptureParameters = cmd.get_models('VirtualMachineCaptureParameters') + client = _compute_client_factory(cmd.cli_ctx) + parameter = VirtualMachineCaptureParameters(vhd_prefix=vhd_name_prefix, + destination_container_name=storage_container, + overwrite_vhds=overwrite) + poller = client.virtual_machines.begin_capture(resource_group_name, vm_name, parameter) + result = LongRunningOperation(cmd.cli_ctx)(poller) + output = getattr(result, 'output', None) or result.resources[0] + print(json.dumps(output, indent=2)) # pylint: disable=no-member + + +# pylint: disable=too-many-locals, unused-argument, too-many-statements, too-many-branches, broad-except +def create_vm(cmd, vm_name, resource_group_name, image=None, size='Standard_DS1_v2', location=None, tags=None, + no_wait=False, authentication_type=None, admin_password=None, computer_name=None, + admin_username=None, ssh_dest_key_path=None, ssh_key_value=None, generate_ssh_keys=False, + availability_set=None, nics=None, nsg=None, nsg_rule=None, accelerated_networking=None, + private_ip_address=None, public_ip_address=None, public_ip_address_allocation='dynamic', + public_ip_address_dns_name=None, public_ip_sku=None, os_disk_name=None, os_type=None, + storage_account=None, os_caching=None, data_caching=None, storage_container_name=None, storage_sku=None, + use_unmanaged_disk=False, attach_os_disk=None, os_disk_size_gb=None, attach_data_disks=None, + data_disk_sizes_gb=None, disk_info=None, + vnet_name=None, vnet_address_prefix='10.0.0.0/16', subnet=None, subnet_address_prefix='10.0.0.0/24', + storage_profile=None, os_publisher=None, os_offer=None, os_sku=None, os_version=None, + storage_account_type=None, vnet_type=None, nsg_type=None, public_ip_address_type=None, nic_type=None, + validate=False, custom_data=None, secrets=None, plan_name=None, plan_product=None, plan_publisher=None, + plan_promotion_code=None, license_type=None, assign_identity=None, identity_scope=None, + identity_role=None, identity_role_id=None, encryption_identity=None, + application_security_groups=None, zone=None, boot_diagnostics_storage=None, ultra_ssd_enabled=None, + ephemeral_os_disk=None, ephemeral_os_disk_placement=None, + proximity_placement_group=None, dedicated_host=None, dedicated_host_group=None, aux_subscriptions=None, + priority=None, max_price=None, eviction_policy=None, enable_agent=None, workspace=None, vmss=None, + os_disk_encryption_set=None, data_disk_encryption_sets=None, specialized=None, + encryption_at_host=None, enable_auto_update=None, patch_mode=None, ssh_key_name=None, + enable_hotpatching=None, platform_fault_domain=None, security_type=None, enable_secure_boot=None, + enable_vtpm=None, count=None, edge_zone=None, nic_delete_option=None, os_disk_delete_option=None, + data_disk_delete_option=None, user_data=None, capacity_reservation_group=None, enable_hibernation=None, + v_cpus_available=None, v_cpus_per_core=None, accept_term=None, + disable_integrity_monitoring=None, # Unused + enable_integrity_monitoring=False, + os_disk_security_encryption_type=None, os_disk_secure_vm_disk_encryption_set=None, + disk_controller_type=None, disable_integrity_monitoring_autoupgrade=False, enable_proxy_agent=None, + proxy_agent_mode=None, source_snapshots_or_disks=None, source_snapshots_or_disks_size_gb=None, + source_disk_restore_point=None, source_disk_restore_point_size_gb=None, ssh_key_type=None, + additional_scheduled_events=None, enable_user_reboot_scheduled_events=None, + enable_user_redeploy_scheduled_events=None, zone_placement_policy=None, include_zones=None, + exclude_zones=None, align_regional_disks_to_vm_zone=None, wire_server_mode=None, imds_mode=None, + wire_server_access_control_profile_reference_id=None, imds_access_control_profile_reference_id=None, + key_incarnation_id=None, add_proxy_agent_extension=None, disk_iops_read_write=None, + disk_mbps_read_write=None): + + from azure.cli.core.commands.client_factory import get_subscription_id + from azure.cli.core.util import random_string, hash_string + from azure.cli.core.commands.arm import ArmTemplateBuilder + from azure.cli.command_modules.vm._template_builder import (build_vm_resource, + build_storage_account_resource, build_nic_resource, + build_vnet_resource, build_nsg_resource, + build_public_ip_resource, StorageProfile, + build_msi_role_assignment, + build_vm_linux_log_analytics_workspace_agent, + build_vm_windows_log_analytics_workspace_agent) + from azure.cli.command_modules.vm._vm_utils import ArmTemplateBuilder20190401 + from azure.mgmt.core.tools import resource_id, is_valid_resource_id, parse_resource_id + + # In the latest profile, the default public IP will be expected to be changed from Basic to Standard, + # and Basic option will be removed. + # In order to avoid breaking change which has a big impact to users, + # we use the hint to guide users to use Standard public IP to create VM in the first stage. + if cmd.cli_ctx.cloud.profile == 'latest': + if public_ip_sku == "Basic": + logger.warning(remove_basic_option_msg, "--public-ip-sku Standard") + + subscription_id = get_subscription_id(cmd.cli_ctx) + if os_disk_encryption_set is not None and not is_valid_resource_id(os_disk_encryption_set): + os_disk_encryption_set = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_encryption_set) + if os_disk_secure_vm_disk_encryption_set is not None and\ + not is_valid_resource_id(os_disk_secure_vm_disk_encryption_set): + os_disk_secure_vm_disk_encryption_set = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_secure_vm_disk_encryption_set) + + if data_disk_encryption_sets is None: + data_disk_encryption_sets = [] + for i, des in enumerate(data_disk_encryption_sets): + if des is not None and not is_valid_resource_id(des): + data_disk_encryption_sets[i] = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=des) + + storage_sku = disk_info['os'].get('storageAccountType') + + network_id_template = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Network') + + vm_id = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='virtualMachines', name=vm_name) + + # determine final defaults and calculated values + tags = tags or {} + os_disk_name = os_disk_name or ('osdisk_{}'.format(hash_string(vm_id, length=10)) if use_unmanaged_disk else None) + storage_container_name = storage_container_name or 'vhds' + + # Build up the ARM template + if count is None: + master_template = ArmTemplateBuilder() + else: + master_template = ArmTemplateBuilder20190401() + + vm_dependencies = [] + if storage_account_type == 'new': + storage_account = storage_account or 'vhdstorage{}'.format( + hash_string(vm_id, length=14, force_lower=True)) + vm_dependencies.append('Microsoft.Storage/storageAccounts/{}'.format(storage_account)) + master_template.add_resource(build_storage_account_resource(cmd, storage_account, location, + tags, storage_sku, edge_zone)) + + nic_name = None + if nic_type == 'new': + nic_name = '{}VMNic'.format(vm_name) + nic_full_name = 'Microsoft.Network/networkInterfaces/{}'.format(nic_name) + if count: + vm_dependencies.extend([nic_full_name + str(i) for i in range(count)]) + else: + vm_dependencies.append(nic_full_name) + + nic_dependencies = [] + if vnet_type == 'new': + subnet = subnet or '{}Subnet'.format(vm_name) + vnet_exists = False + if vnet_name: + from azure.cli.command_modules.vm._vm_utils import check_existence + vnet_exists = \ + check_existence(cmd.cli_ctx, vnet_name, resource_group_name, 'Microsoft.Network', 'virtualNetworks') + if vnet_exists: + SubnetCreate = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.vnet.subnet").Create + try: + poller = SubnetCreate(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': subnet, + 'vnet_name': vnet_name, + 'resource_group': resource_group_name, + 'address_prefixes': [subnet_address_prefix], + 'address_prefix': subnet_address_prefix + }) + LongRunningOperation(cmd.cli_ctx)(poller) + except Exception: + raise CLIError('Subnet({}) does not exist, but failed to create a new subnet with address ' + 'prefix {}. It may be caused by name or address prefix conflict. Please specify ' + 'an appropriate subnet name with --subnet or a valid address prefix value with ' + '--subnet-address-prefix.'.format(subnet, subnet_address_prefix)) + if not vnet_exists: + vnet_name = vnet_name or '{}VNET'.format(vm_name) + nic_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name)) + master_template.add_resource(build_vnet_resource(cmd, vnet_name, location, tags, vnet_address_prefix, + subnet, subnet_address_prefix, edge_zone=edge_zone)) + + if nsg_type == 'new': + if nsg_rule is None: + nsg_rule = 'RDP' if os_type.lower() == 'windows' else 'SSH' + nsg = nsg or '{}NSG'.format(vm_name) + nic_dependencies.append('Microsoft.Network/networkSecurityGroups/{}'.format(nsg)) + master_template.add_resource(build_nsg_resource(cmd, nsg, location, tags, nsg_rule)) + + if public_ip_address_type == 'new': + public_ip_address = public_ip_address or '{}PublicIP'.format(vm_name) + public_ip_address_full_name = 'Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address) + if count: + nic_dependencies.extend([public_ip_address_full_name + str(i) for i in range(count)]) + else: + nic_dependencies.append(public_ip_address_full_name) + master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location, tags, + public_ip_address_allocation, + public_ip_address_dns_name, + public_ip_sku, zone, count, edge_zone)) + + subnet_id = subnet if is_valid_resource_id(subnet) else \ + '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template, vnet_name, subnet) + + nsg_id = None + if nsg: + nsg_id = nsg if is_valid_resource_id(nsg) else \ + '{}/networkSecurityGroups/{}'.format(network_id_template, nsg) + + public_ip_address_id = None + if public_ip_address: + public_ip_address_id = public_ip_address if is_valid_resource_id(public_ip_address) \ + else '{}/publicIPAddresses/{}'.format(network_id_template, public_ip_address) + + nics_id = '{}/networkInterfaces/{}'.format(network_id_template, nic_name) + + if count: + nics = [ + { + 'id': "[concat('{}', copyIndex())]".format(nics_id), + 'properties': { + 'deleteOption': nic_delete_option + } + } + ] + else: + nics = [ + { + 'id': nics_id, + 'properties': { + 'deleteOption': nic_delete_option + } + } + ] + + nic_resource = build_nic_resource( + cmd, nic_name, location, tags, vm_name, subnet_id, private_ip_address, nsg_id, + public_ip_address_id, application_security_groups, accelerated_networking=accelerated_networking, + count=count, edge_zone=edge_zone) + nic_resource['dependsOn'] = nic_dependencies + master_template.add_resource(nic_resource) + else: + # Using an existing NIC + invalid_parameters = [nsg, public_ip_address, subnet, vnet_name, application_security_groups] + if any(invalid_parameters): + raise CLIError('When specifying an existing NIC, do not specify NSG, ' + 'public IP, ASGs, VNet or subnet.') + if accelerated_networking is not None: + logger.warning('When specifying an existing NIC, do not specify accelerated networking. ' + 'Ignore --accelerated-networking now. ' + 'This will trigger an error instead of a warning in future releases.') + + os_vhd_uri = None + if storage_profile in [StorageProfile.SACustomImage, StorageProfile.SAPirImage]: + storage_account_name = storage_account.rsplit('/', 1) + storage_account_name = storage_account_name[1] if \ + len(storage_account_name) > 1 else storage_account_name[0] + os_vhd_uri = 'https://{}.blob.{}/{}/{}.vhd'.format( + storage_account_name, cmd.cli_ctx.cloud.suffixes.storage_endpoint, storage_container_name, os_disk_name) + elif storage_profile == StorageProfile.SASpecializedOSDisk: + os_vhd_uri = attach_os_disk + os_disk_name = attach_os_disk.rsplit('/', 1)[1][:-4] + + if custom_data: + custom_data = read_content_if_is_file(custom_data) + + if user_data: + user_data = read_content_if_is_file(user_data) + + if secrets: + secrets = _merge_secrets([validate_file_or_dict(secret) for secret in secrets]) + + vm_resource = build_vm_resource( + name=vm_name, location=location, tags=tags, size=size, storage_profile=storage_profile, nics=nics, + admin_username=admin_username, availability_set_id=availability_set, admin_password=admin_password, + ssh_key_values=ssh_key_value, ssh_key_path=ssh_dest_key_path, image_reference=image, + os_disk_name=os_disk_name, custom_image_os_type=os_type, authentication_type=authentication_type, + os_publisher=os_publisher, os_offer=os_offer, os_sku=os_sku, os_version=os_version, os_vhd_uri=os_vhd_uri, + attach_os_disk=attach_os_disk, os_disk_size_gb=os_disk_size_gb, custom_data=custom_data, secrets=secrets, + license_type=license_type, zone=zone, disk_info=disk_info, + boot_diagnostics_storage_uri=boot_diagnostics_storage, ultra_ssd_enabled=ultra_ssd_enabled, + proximity_placement_group=proximity_placement_group, computer_name=computer_name, + dedicated_host=dedicated_host, priority=priority, max_price=max_price, eviction_policy=eviction_policy, + enable_agent=enable_agent, vmss=vmss, os_disk_encryption_set=os_disk_encryption_set, + data_disk_encryption_sets=data_disk_encryption_sets, specialized=specialized, + encryption_at_host=encryption_at_host, dedicated_host_group=dedicated_host_group, + enable_auto_update=enable_auto_update, patch_mode=patch_mode, enable_hotpatching=enable_hotpatching, + platform_fault_domain=platform_fault_domain, security_type=security_type, enable_secure_boot=enable_secure_boot, + enable_vtpm=enable_vtpm, count=count, edge_zone=edge_zone, os_disk_delete_option=os_disk_delete_option, + user_data=user_data, capacity_reservation_group=capacity_reservation_group, + enable_hibernation=enable_hibernation, v_cpus_available=v_cpus_available, v_cpus_per_core=v_cpus_per_core, + os_disk_security_encryption_type=os_disk_security_encryption_type, + os_disk_secure_vm_disk_encryption_set=os_disk_secure_vm_disk_encryption_set, + disk_controller_type=disk_controller_type, enable_proxy_agent=enable_proxy_agent, + proxy_agent_mode=proxy_agent_mode, additional_scheduled_events=additional_scheduled_events, + enable_user_reboot_scheduled_events=enable_user_reboot_scheduled_events, + enable_user_redeploy_scheduled_events=enable_user_redeploy_scheduled_events, + zone_placement_policy=zone_placement_policy, include_zones=include_zones, exclude_zones=exclude_zones, + align_regional_disks_to_vm_zone=align_regional_disks_to_vm_zone, wire_server_mode=wire_server_mode, + imds_mode=imds_mode, + wire_server_access_control_profile_reference_id=wire_server_access_control_profile_reference_id, + imds_access_control_profile_reference_id=imds_access_control_profile_reference_id, + key_incarnation_id=key_incarnation_id, add_proxy_agent_extension=add_proxy_agent_extension, + disk_iops_read_write=disk_iops_read_write, disk_mbps_read_write=disk_mbps_read_write) + + vm_resource['dependsOn'] = vm_dependencies + + if plan_name: + vm_resource['plan'] = { + 'name': plan_name, + 'publisher': plan_publisher, + 'product': plan_product, + 'promotionCode': plan_promotion_code + } + + enable_local_identity = None + if assign_identity is not None: + vm_resource['identity'], _, _, enable_local_identity = _build_identities_info(assign_identity) + if identity_scope: + role_assignment_guid = str(_gen_guid()) + master_template.add_resource(build_msi_role_assignment(vm_name, vm_id, identity_role_id, + role_assignment_guid, identity_scope)) + + if encryption_identity: + if 'identity' in vm_resource and 'userAssignedIdentities' in vm_resource['identity'] \ + and encryption_identity.lower() in \ + (k.lower() for k in vm_resource['identity']['userAssignedIdentities'].keys()): + if 'securityProfile' not in vm_resource['properties']: + vm_resource['properties']['securityProfile'] = {} + if 'encryptionIdentity' not in vm_resource['properties']['securityProfile']: + vm_resource['properties']['securityProfile']['encryptionIdentity'] = {} + + vm_securityProfile_EncryptionIdentity = vm_resource['properties']['securityProfile']['encryptionIdentity'] + + if 'userAssignedIdentityResourceId' not in vm_securityProfile_EncryptionIdentity or \ + vm_securityProfile_EncryptionIdentity['userAssignedIdentityResourceId'] != encryption_identity: + vm_resource['properties']['securityProfile']['encryptionIdentity']['userAssignedIdentityResourceId'] \ + = encryption_identity + else: + raise CLIError("Encryption Identity should be an ARM Resource ID of one of the " + "user assigned identities associated to the resource") + + if workspace is not None: + workspace_id = _prepare_workspace(cmd, resource_group_name, workspace) + master_template.add_secure_parameter('workspaceId', workspace_id) + if os_type.lower() == 'linux': + vm_mmaExtension_resource = build_vm_linux_log_analytics_workspace_agent(cmd, vm_name, location) + master_template.add_resource(vm_mmaExtension_resource) + elif os_type.lower() == 'windows': + vm_mmaExtension_resource = build_vm_windows_log_analytics_workspace_agent(cmd, vm_name, location) + master_template.add_resource(vm_mmaExtension_resource) + else: + logger.warning("Unsupported OS type. Skip the connection step for log analytics workspace.") + + master_template.add_resource(vm_resource) + + if admin_password: + master_template.add_secure_parameter('adminPassword', admin_password) + + template = master_template.build() + parameters = master_template.build_parameters() + + # deploy ARM template + deployment_name = 'vm_deploy_' + random_string(32) + client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, + aux_subscriptions=aux_subscriptions).deployments + DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) + properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental') + Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) + deployment = Deployment(properties=properties) + + if validate: + from azure.cli.command_modules.vm._vm_utils import log_pprint_template + log_pprint_template(template) + log_pprint_template(parameters) + + if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES): + validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment) + return LongRunningOperation(cmd.cli_ctx)(validation_poller) + + return client.validate(resource_group_name, deployment_name, deployment) + + # creates the VM deployment + if no_wait: + return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment) + LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update(resource_group_name, deployment_name, deployment)) + + # Guest Attestation Extension and enable System Assigned MSI by default + is_trusted_launch = security_type and security_type.lower() == 'trustedlaunch' and\ + enable_vtpm and enable_secure_boot + is_confidential_vm = security_type and security_type.lower() == 'confidentialvm' + if (is_trusted_launch or is_confidential_vm) and enable_integrity_monitoring: + vm = get_vm_by_aaz(cmd, resource_group_name, vm_name, 'instanceView') + + publisher = '' + if vm.get('storageProfile', {}).get('osDisk', {}).get('osType', '') == 'Linux': + publisher = 'Microsoft.Azure.Security.LinuxAttestation' + elif vm.get('storageProfile', {}).get('osDisk', {}).get('osType', '') == 'Windows': + publisher = 'Microsoft.Azure.Security.WindowsAttestation' + + version = _normalize_extension_version(cmd.cli_ctx, publisher, 'GuestAttestation', None, vm['location']) + + vm_extension_args = { + 'resource_group': resource_group_name, + 'vm_extension_name': 'GuestAttestation', + 'vm_name': vm_name, + 'location': vm['location'], + 'auto_upgrade_minor_version': True, + 'enable_automatic_upgrade': not disable_integrity_monitoring_autoupgrade, + 'protected_settings': None, + 'publisher': publisher, + 'settings': None, + 'type': 'GuestAttestation', + 'type_handler_version': version + } + + try: + from .operations.vm_extension import VMExtensionCreate + create_vm_extension = VMExtensionCreate(cli_ctx=cmd.cli_ctx)(command_args=vm_extension_args) + LongRunningOperation(cmd.cli_ctx)(create_vm_extension) + logger.info('Guest Attestation Extension has been successfully installed by default ' + 'when Trusted Launch configuration is met') + except Exception as e: + error_type = "Trusted Launch" if is_trusted_launch else "Confidential VM" + logger.error('Failed to install Guest Attestation Extension for %s. %s', error_type, e) + if count: + vm_names = [vm_name + str(i) for i in range(count)] + else: + vm_names = [vm_name] + vms = [] + # Use vm_name2 to avoid R1704: Redefining argument with the local name 'vm_name' (redefined-argument-from-local) + for vm_name2 in vm_names: + vm = get_vm_details(cmd, resource_group_name, vm_name2) + if assign_identity is not None: + if enable_local_identity and not identity_scope: + _show_missing_access_warning(resource_group_name, vm_name2, 'vm') + vm['identity'] = _construct_identity_info(identity_scope, identity_role, + vm.get('identity', {}).get('principalId', None), + vm.get('identity', {}).get('userAssignedIdentities', None)) + vms.append(vm) + + if workspace is not None: + workspace_name = parse_resource_id(workspace_id)['name'] + _set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name) + + if len(vms) == 1: + return vms[0] + return vms + + +def auto_shutdown_vm(cmd, resource_group_name, vm_name, off=None, email=None, webhook=None, time=None, + location=None): + from ..lab.aaz.latest.lab.global_schedule import Delete as DeleteSchedule, Create as CreateSchedule + from azure.mgmt.core.tools import resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + subscription_id = get_subscription_id(cmd.cli_ctx) + name = 'shutdown-computevm-' + vm_name + vm_id = resource_id(subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='virtualMachines', name=vm_name) + + schedule = { + 'name': name, + 'resource_group': resource_group_name + } + if off: + if email is not None or webhook is not None or time is not None: + # I don't want to disrupt users. So I warn instead of raising an error. + logger.warning('If --off, other parameters will be ignored.') + return DeleteSchedule(cli_ctx=cmd.cli_ctx)(command_args=schedule) + + if time is None: + raise CLIError('usage error: --time is a required parameter') + daily_recurrence = {'time': time} + notification_settings = None + if email or webhook: + notification_settings = { + 'timeInMinutes': 30, + 'status': 'Enabled' + } + if email: + notification_settings['emailRecipient'] = email + if webhook: + notification_settings['webhookUrl'] = webhook + + schedule.update({ + 'status': 'Enabled', + 'target_resource_id': vm_id, + 'daily_recurrence': daily_recurrence, + 'notification_settings': notification_settings, + 'time_zone_id': 'UTC', + 'task_type': 'ComputeVmShutdownTask', + 'location': location + }) + return CreateSchedule(cli_ctx=cmd.cli_ctx)(command_args=schedule) + + +def get_instance_view(cmd, resource_group_name, vm_name, include_user_data=False): + from .operations.vm import VMShow + expand = 'instanceView' + if include_user_data: + expand = expand + ',userData' + + result = VMShow(cli_ctx=cmd.cli_ctx)(command_args={ + "resource_group": resource_group_name, + "vm_name": vm_name, + "expand": expand, + }) + return result + + +def get_vm_by_aaz(cmd, resource_group_name, vm_name, expand=None): + from .operations.vm import VMShow + command_args = { + 'resource_group': resource_group_name, + 'vm_name': vm_name, + } + + if expand: + command_args['expand'] = expand + + return VMShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) + + +def get_vm(cmd, resource_group_name, vm_name, expand=None): + client = _compute_client_factory(cmd.cli_ctx) + return client.virtual_machines.get(resource_group_name, vm_name, expand=expand) + + +def get_vm_to_update(cmd, resource_group_name, vm_name): + client = _compute_client_factory(cmd.cli_ctx) + vm = client.virtual_machines.get(resource_group_name, vm_name) + # To avoid unnecessary permission check of image + vm.storage_profile.image_reference = None + return vm + + +def get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name): + from .operations.vm import VMShow + + vm = VMShow(cli_ctx=cmd.cli_ctx)(command_args={ + "resource_group": resource_group_name, + "vm_name": vm_name + }) + + # To avoid unnecessary permission check of image + storage_profile = vm.get("storageProfile", {}) + storage_profile["imageReference"] = None + + return vm + + +def get_vm_details(cmd, resource_group_name, vm_name, include_user_data=False): + from azure.mgmt.core.tools import parse_resource_id + + NicShow = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nic").Show + PublicIPShow = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.public_ip").Show + + result = get_instance_view(cmd, resource_group_name, vm_name, include_user_data) + public_ips = [] + fqdns = [] + private_ips = [] + mac_addresses = [] + # pylint: disable=line-too-long,no-member + for nic_ref in result.get('networkProfile', {}).get('networkInterfaces', []): + nic_parts = parse_resource_id(nic_ref['id']) + nic = NicShow(cli_ctx=cmd.cli_ctx)(command_args={ + "name": nic_parts['name'], + 'resource_group': nic_parts['resource_group'] + }) + if 'macAddress' in nic: + mac_addresses.append(nic['macAddress']) + for ip_configuration in nic['ipConfigurations']: + if 'privateIPAddress' in ip_configuration: + private_ips.append(ip_configuration['privateIPAddress']) + if 'publicIPAddress' in ip_configuration: + res = parse_resource_id(ip_configuration['publicIPAddress']['id']) + public_ip_info = PublicIPShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': res['name'], + 'resource_group': res['resource_group'] + }) + if 'ipAddress' in public_ip_info: + public_ips.append(public_ip_info['ipAddress']) + if 'dnsSettings' in public_ip_info: + fqdns.append(public_ip_info['dnsSettings']['fqdn']) + + result['powerState'] = ','.join([s['displayStatus'] for s in result.get('instanceView', {}).get('statuses', []) + if s['code'].startswith('PowerState/')]) + result['publicIps'] = ','.join(public_ips) + result['fqdns'] = ','.join(fqdns) + result['privateIps'] = ','.join(private_ips) + result['macAddresses'] = ','.join(mac_addresses) + + del result['instanceView'] # we don't need other instanceView info as people won't care + return result + + +def list_skus(cmd, location=None, size=None, zone=None, show_all=None, resource_type=None): + from ._vm_utils import list_sku_info, is_sku_available + result = list_sku_info(cmd.cli_ctx, location) + # pylint: disable=too-many-nested-blocks + if not show_all: + available_skus = [] + for sku_info in result: + if is_sku_available(sku_info, zone): + available_skus.append(sku_info) + result = available_skus + if resource_type: + result = [x for x in result if x['resourceType'].lower() == resource_type.lower()] + if size: + result = [x for x in result if x['resourceType'] == 'virtualMachines' and size.lower() in x['name'].lower()] + if zone: + result = [x for x in result if x['locationInfo'] and x['locationInfo'][0]['zones']] + return result + + +# pylint: disable=redefined-builtin +def list_vm(cmd, resource_group_name=None, show_details=False, vmss=None): + from azure.mgmt.core.tools import resource_id, is_valid_resource_id, parse_resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + from .aaz.latest.vm import List as VMList + if vmss is not None: + if is_valid_resource_id(vmss): + filter = "'virtualMachineScaleSet/id' eq '{}'".format(vmss) + if resource_group_name is None: + resource_group_name = parse_resource_id(vmss)['resource_group'] + else: + if resource_group_name is None: + raise RequiredArgumentMissingError( + 'usage error: please specify the --resource-group when listing VM instances with VMSS name') + vmss_id = resource_id(subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='virtualMachineScaleSets', name=vmss) + filter = "'virtualMachineScaleSet/id' eq '{}'".format(vmss_id) + + vm_list = VMList(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + "filter": filter + }) + else: + from .aaz.latest.vm import ListAll as VMListAll + vm_list = VMList(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name + }) if resource_group_name else VMListAll(cli_ctx=cmd.cli_ctx)(command_args={}) + + if show_details: + return [get_vm_details(cmd, _parse_rg_name(v['id'])[0], v['name']) for v in vm_list] + + return list(vm_list) + + +def list_vm_ip_addresses(cmd, resource_group_name=None, vm_name=None): + # We start by getting NICs as they are the smack in the middle of all data that we + # want to collect for a VM (as long as we don't need any info on the VM than what + # is available in the Id, we don't need to make any calls to the compute RP) + # + # Since there is no guarantee that a NIC is in the same resource group as a given + # Virtual Machine, we can't constrain the lookup to only a single group... + NicList = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nic").List + PublicIPList = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.public_ip").List + + nics = NicList(cli_ctx=cmd.cli_ctx)(command_args={}) + public_ip_addresses = PublicIPList(cli_ctx=cmd.cli_ctx)(command_args={}) + + ip_address_lookup = {pip['id']: pip for pip in list(public_ip_addresses)} + + result = [] + for nic in [n for n in list(nics) if 'virtualMachine' in n and n['virtualMachine']]: + nic_resource_group, nic_vm_name = _parse_rg_name(nic['virtualMachine']['id']) + + # If provided, make sure that resource group name and vm name match the NIC we are + # looking at before adding it to the result... + same_resource_group_name = (resource_group_name is None or + resource_group_name.lower() == nic_resource_group.lower()) + same_vm_name = (vm_name is None or + vm_name.lower() == nic_vm_name.lower()) + if same_resource_group_name and same_vm_name: + network_info = { + 'privateIpAddresses': [], + 'publicIpAddresses': [] + } + for ip_configuration in nic['ipConfigurations']: + network_info['privateIpAddresses'].append(ip_configuration['privateIPAddress']) + if 'publicIPAddress' in ip_configuration and ip_configuration['publicIPAddress'] and \ + ip_configuration['publicIPAddress']['id'] in ip_address_lookup: + public_ip_address = ip_address_lookup[ip_configuration['publicIPAddress']['id']] + + public_ip_addr_info = { + 'id': public_ip_address['id'], + 'name': public_ip_address['name'], + 'ipAddress': public_ip_address.get('ipAddress', None), + 'ipAllocationMethod': public_ip_address.get('publicIPAllocationMethod', None) + } + + try: + public_ip_addr_info['zone'] = public_ip_address['zones'][0] \ + if 'zones' in public_ip_address else None + except (KeyError, IndexError, TypeError): + pass + + network_info['publicIpAddresses'].append(public_ip_addr_info) + + result.append({ + 'virtualMachine': { + 'resourceGroup': nic_resource_group, + 'name': nic_vm_name, + 'network': network_info + } + }) + + return result + + +def open_vm_port(cmd, resource_group_name, vm_name, port, priority=900, network_security_group_name=None, + apply_to_subnet=False): + from azure.mgmt.core.tools import parse_resource_id + _nic = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nic") + NicShow, NicUpdate = _nic.Show, _nic.Update + _subnet = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.vnet.subnet") + SubnetShow, SubnetUpdate = _subnet.Show, _subnet.Update + _nsg = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nsg") + NSGShow, NSGCreate = _nsg.Show, _nsg.Create + NSGRuleCreate = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nsg.rule").Create + + vm = get_vm_by_aaz(cmd, resource_group_name, vm_name) + location = vm.get('location', '') + if not vm.get('networkProfile'): + raise CLIError("Network profile not found for VM '{}'".format(vm_name)) + + nic_ids = vm.get('networkProfile', {}).get('networkInterfaces', []) + if len(nic_ids) > 1: + raise CLIError('Multiple NICs is not supported for this command. Create rules on the NSG ' + 'directly.') + if not nic_ids: + raise CLIError("No NIC associated with VM '{}'".format(vm_name)) + + # get existing NSG or create a new one + created_nsg = False + nic = NicShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': os.path.split(nic_ids[0].get('id'))[1], + 'resource_group': resource_group_name + }) + if not apply_to_subnet: + nsg = nic['networkSecurityGroup'] + else: + subnet_id = parse_resource_id(nic['ipConfigurations'][0]['subnet']['id']) + subnet = SubnetShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': subnet_id['child_name_1'], + 'vnet_name': subnet_id['name'], + 'resource_group': resource_group_name + }) + nsg = subnet['networkSecurityGroup'] if 'networkSecurityGroup' in subnet else None + + if not nsg: + nsg = LongRunningOperation(cmd.cli_ctx, 'Creating network security group')( + NSGCreate(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': network_security_group_name, + 'resource_group': resource_group_name, + 'location': location + })) + created_nsg = True + + # update the NSG with the new rule to allow inbound traffic + + rule_name = 'open-port-all' if port == '*' else 'open-port-{}'.format(port.replace(',', '_')) + + # use portranges if multiple ports are entered + if "," not in port: + port_arg = { + 'destination_port_range': port + } + else: + port_arg = { + 'destination_port_ranges': port.split(',') + } + + nsg_name = nsg['name'] if 'name' in nsg else os.path.split(nsg['id'])[1] + LongRunningOperation(cmd.cli_ctx, 'Adding security rule')( + NSGRuleCreate(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': rule_name, + 'nsg_name': nsg_name, + 'resource_group': resource_group_name, + 'protocol': '*', + 'access': 'allow', + 'direction': 'inbound', + 'source_port_range': '*', + **port_arg, + 'priority': priority, + 'source_address_prefix': '*', + 'destination_address_prefix': '*' + }) + ) + + # update the NIC or subnet if a new NSG was created + if created_nsg and not apply_to_subnet: + nic['networkSecurityGroup'] = nsg + LongRunningOperation(cmd.cli_ctx, 'Updating NIC')( + NicUpdate(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': nic['name'], + 'resource_group': resource_group_name, + 'security_rules': nic + })) + elif created_nsg and apply_to_subnet: + subnet['networkSecurityGroup'] = nsg + LongRunningOperation(cmd.cli_ctx, 'Updating subnet')( + SubnetUpdate(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': subnet_id['child_name_1'], + 'resource_group': resource_group_name, + 'vnet_name': subnet_id['name'], + 'subnet': subnet + }) + ) + + return NSGShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': nsg_name, + 'resource_group': resource_group_name + }) + + +def resize_vm(cmd, resource_group_name, vm_name, size, no_wait=False): + from .operations.vm import VMCreate, convert_show_result_to_snake_case as to_snake_case + + vm = to_snake_case(get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) or {}) or {} + current_size = (vm.get("hardware_profile") or {}).get("vm_size") + if current_size == size: + logger.warning("VM is already %s", size) + return None + + vm.pop("resources", None) + + if vm.get("hardware_profile") is None: + vm["hardware_profile"] = {} + vm["hardware_profile"]["vm_size"] = size + + vm["resource_group"] = resource_group_name + vm["vm_name"] = vm_name + vm["no_wait"] = no_wait + + return VMCreate(cli_ctx=cmd.cli_ctx)(command_args=vm) + + +def restart_vm(cmd, resource_group_name, vm_name, no_wait=False, force=False): + from .aaz.latest.vm import Redeploy as _VMRedeploy, Restart as _VMRestart + + command_args = { + "resource_group": resource_group_name, + "vm_name": vm_name, + "no_wait": no_wait, + } + + if force: + return _VMRedeploy(cli_ctx=cmd.cli_ctx)(command_args=command_args) + + return _VMRestart(cli_ctx=cmd.cli_ctx)(command_args=command_args) + + +def set_vm(cmd, instance, lro_operation=None, no_wait=False): + instance.resources = None # Issue: https://github.com/Azure/autorest/issues/934 + client = _compute_client_factory(cmd.cli_ctx) + parsed_id = _parse_rg_name(instance.id) + poller = sdk_no_wait(no_wait, client.virtual_machines.begin_create_or_update, + resource_group_name=parsed_id[0], + vm_name=parsed_id[1], + parameters=instance) + if lro_operation: + return lro_operation(poller) + + return LongRunningOperation(cmd.cli_ctx)(poller) + + +# Notes: vm format is in snake_case +def set_vm_by_aaz(cmd, vm, no_wait=False): + from .aaz.latest.vm import Create as _VMCreate + + parsed_id = _parse_rg_name(vm["id"]) + vm["resource_group"] = parsed_id[0] + vm["vm_name"] = parsed_id[1] + vm["no_wait"] = no_wait + + class SetVM(_VMCreate): + def _output(self, *args, **kwargs): + from azure.cli.core.aaz import AAZUndefined, has_value + + # Resolve flatten conflict + # When the type field conflicts, the type in inner layer is ignored and the outer layer is applied + if has_value(self.ctx.vars.instance.resources): + for resource in self.ctx.vars.instance.resources: + if has_value(resource.type): + resource.type = AAZUndefined + + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + if result.get('osProfile', {}).get('secrets', []): + for secret in result['osProfile']['secrets']: + for cert in secret.get('vaultCertificates', []): + if not cert.get('certificateStore'): + cert['certificateStore'] = None + return result + + vm = LongRunningOperation(cmd.cli_ctx)( + SetVM(cli_ctx=cmd.cli_ctx)(command_args=vm)) + + return vm + + +def patch_vm(cmd, resource_group_name, vm_name, vm): + client = _compute_client_factory(cmd.cli_ctx) + poller = client.virtual_machines.begin_update(resource_group_name, vm_name, vm) + return LongRunningOperation(cmd.cli_ctx)(poller) + + +def patch_disk_encryption_set(cmd, resource_group_name, disk_encryption_set_name, disk_encryption_set_update): + client = _compute_client_factory(cmd.cli_ctx) + poller = client.disk_encryption_sets.begin_update(resource_group_name, disk_encryption_set_name, + disk_encryption_set_update) + return LongRunningOperation(cmd.cli_ctx)(poller) + + +def show_vm(cmd, resource_group_name, vm_name, show_details=False, include_user_data=False): + if show_details: + return get_vm_details(cmd, resource_group_name, vm_name, include_user_data) + + expand = None + if include_user_data: + expand = "userData" + return get_vm_by_aaz(cmd, resource_group_name, vm_name, expand) + + +def update_vm(cmd, resource_group_name, vm_name, os_disk=None, disk_caching=None, + write_accelerator=None, license_type=None, no_wait=False, ultra_ssd_enabled=None, + priority=None, max_price=None, proximity_placement_group=None, workspace=None, enable_secure_boot=None, + enable_vtpm=None, user_data=None, capacity_reservation_group=None, + dedicated_host=None, dedicated_host_group=None, size=None, ephemeral_os_disk_placement=None, + enable_hibernation=None, v_cpus_available=None, v_cpus_per_core=None, disk_controller_type=None, + security_type=None, enable_proxy_agent=None, proxy_agent_mode=None, additional_scheduled_events=None, + enable_user_reboot_scheduled_events=None, enable_user_redeploy_scheduled_events=None, + align_regional_disks_to_vm_zone=None, wire_server_mode=None, imds_mode=None, + add_proxy_agent_extension=None, + wire_server_access_control_profile_reference_id=None, imds_access_control_profile_reference_id=None, + key_incarnation_id=None, **kwargs): + from azure.mgmt.core.tools import parse_resource_id, resource_id, is_valid_resource_id + from ._vm_utils import update_write_accelerator_settings, update_disk_caching_by_aaz + from .operations.vm import convert_show_result_to_snake_case as vm_convert_show_result_to_snake_case + vm = kwargs['parameters'] + vm = vm_convert_show_result_to_snake_case(vm) + + if wire_server_access_control_profile_reference_id is not None or \ + imds_access_control_profile_reference_id is not None: + from .aaz.latest.vm import Patch as VMPatchUpdate + + class VMUpdateReferenceId(VMPatchUpdate): + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=False) + return result + + security_profile = {'proxy_agent_settings': {}} + if wire_server_access_control_profile_reference_id: + security_profile['proxy_agent_settings']['wire_server'] = { + 'in_vm_access_control_profile_reference_id': wire_server_access_control_profile_reference_id} + if imds_access_control_profile_reference_id: + security_profile['proxy_agent_settings']['imds'] = { + 'in_vm_access_control_profile_reference_id': imds_access_control_profile_reference_id} + + LongRunningOperation(cmd.cli_ctx)(VMUpdateReferenceId(cli_ctx=cmd.cli_ctx)(command_args={ + 'vm_name': vm_name, + 'resource_group': resource_group_name, + 'security_profile': security_profile + })) + vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) + vm = vm_convert_show_result_to_snake_case(vm) + + if add_proxy_agent_extension is not None: + if vm.get("security_profile", None) is None: + vm["security_profile"] = {} + if vm["security_profile"].get("proxy_agent_settings", None) is None: + vm["security_profile"]["proxy_agent_settings"] = {} + + vm["security_profile"]["proxy_agent_settings"]["add_proxy_agent_extension"] = add_proxy_agent_extension + + disk_name = None + if os_disk is not None: + if is_valid_resource_id(os_disk): + disk_id = os_disk + os_disk_id_parsed = parse_resource_id(os_disk) + disk_name = os_disk_id_parsed['name'] + else: + vm_id_parsed = parse_resource_id(vm["id"]) + disk_id = resource_id(subscription=vm_id_parsed['subscription'], + resource_group=vm_id_parsed['resource_group'], + namespace='Microsoft.Compute', type='disks', name=os_disk) + disk_name = os_disk + + if vm.get("storage_profile", None) is None: + vm["storage_profile"] = {} + if vm["storage_profile"].get("os_disk", None) is None: + vm["storage_profile"]["os_disk"] = {} + if vm["storage_profile"]["os_disk"].get("managed_disk", None) is None: + vm["storage_profile"]["os_disk"]["managed_disk"] = {} + + vm["storage_profile"]["os_disk"]["managed_disk"]["id"] = disk_id + vm["storage_profile"]["os_disk"]["name"] = disk_name + + if align_regional_disks_to_vm_zone is not None: + if vm.get("storage_profile", None) is None: + vm["storage_profile"] = {} + vm["storage_profile"]["align_regional_disks_to_vm_zone"] = align_regional_disks_to_vm_zone + + from ._constants import COMPATIBLE_SECURITY_TYPE_VALUE + if security_type == "TrustedLaunch": + from azure.cli.core.azclierror import InvalidArgumentValueError + if vm.get("security_profile", {}).get("security_type", None) == "ConfidentialVM": + raise InvalidArgumentValueError("{} is already configured with ConfidentialVM. Security Configuration " + "cannot be updated from ConfidentialVM to TrustedLaunch.".format(vm["name"])) # pylint: disable=line-too-long + + if disk_name is None and vm.get("storage_profile", {}).get("os_disk", {}).get("managed_disk", None) is not None: + os_disk_id_parsed = parse_resource_id(vm["storage_profile"]["os_disk"]["managed_disk"]["id"]) + disk_name = os_disk_id_parsed["name"] + + if disk_name is not None: + # Set --enable-secure-boot True and --enable-vtpm True if not specified by end user. + enable_secure_boot = enable_secure_boot if enable_secure_boot is not None else True + enable_vtpm = enable_vtpm if enable_vtpm is not None else True + + if vm.get("security_profile", None) is None: + vm["security_profile"] = {} + vm["security_profile"]["security_type"] = security_type + + elif security_type == COMPATIBLE_SECURITY_TYPE_VALUE: + if vm.get("security_profile", None) is None: + vm["security_profile"] = {} + vm["security_profile"]["security_type"] = security_type + vm["security_profile"]["uefi_settings"] = None + + if write_accelerator is not None: + if vm.get("storage_profile", None) is None: + vm["storage_profile"] = {} + update_write_accelerator_settings(vm["storage_profile"], write_accelerator) + + if disk_caching is not None: + if vm.get("storage_profile", None) is None: + vm["storage_profile"] = {} + update_disk_caching_by_aaz(vm["storage_profile"], disk_caching) + + if license_type is not None: + vm["license_type"] = license_type + + if user_data is not None: + from azure.cli.core.util import b64encode + vm["user_data"] = b64encode(user_data) + + if capacity_reservation_group is not None: + if capacity_reservation_group == 'None': + capacity_reservation_group = None + + sub_resource = {"id": capacity_reservation_group} + capacity_reservation = {"capacity_reservation_group": sub_resource} + vm["capacity_reservation"] = capacity_reservation + + if dedicated_host is not None: + if vm.get("host", None) is None: + vm["host"] = {"id": dedicated_host} + else: + vm["host"]["id"] = dedicated_host + if vm.get("host_group", None) is not None: + vm["host_group"] = None + + if dedicated_host_group is not None: + if vm.get("host_group", None) is None: + vm["host_group"] = {"id": dedicated_host_group} + else: + vm["host_group"]["id"] = dedicated_host_group + if vm.get("host", None) is not None: + vm["host"] = None + + if ultra_ssd_enabled is not None: + if vm.get("additional_capabilities", None) is None: + vm["additional_capabilities"] = {"ultra_ssd_enabled": ultra_ssd_enabled} + else: + vm["additional_capabilities"]["ultra_ssd_enabled"] = ultra_ssd_enabled + + if enable_hibernation is not None: + if vm.get("additional_capabilities", None) is None: + vm["additional_capabilities"] = {"hibernation_enabled": enable_hibernation} + else: + vm["additional_capabilities"]["hibernation_enabled"] = enable_hibernation + + if priority is not None: + vm["priority"] = priority + + if max_price is not None: + if vm.get("billing_profile", None) is None: + vm["billing_profile"] = {"max_price": max_price} + else: + vm["billing_profile"]["max_price"] = max_price + + if proximity_placement_group is not None: + vm["proximity_placement_group"] = {"id": proximity_placement_group} + + if security_type != COMPATIBLE_SECURITY_TYPE_VALUE and (enable_secure_boot is not None or enable_vtpm is not None): + if vm.get("security_profile", None) is None: + vm["security_profile"] = {} + + vm["security_profile"]["uefi_settings"] = {"secure_boot_enabled": enable_secure_boot, + "v_tpm_enabled": enable_vtpm} + + proxy_agent_parameters = [enable_proxy_agent, wire_server_mode, imds_mode, key_incarnation_id] + if any(parameter is not None for parameter in proxy_agent_parameters): + wire_server = {} + imds = {} + if vm.get("security_profile", None) is None: + vm["security_profile"] = {} + vm["security_profile"]["proxy_agent_settings"] = {"wire_server": wire_server, "imds": imds} + elif vm["security_profile"].get("proxy_agent_settings", None) is None: + vm["security_profile"]["proxy_agent_settings"] = {"wire_server": wire_server, "imds": imds} + else: + if vm["security_profile"]["proxy_agent_settings"].get("wire_server", None) is None: + vm["security_profile"]["proxy_agent_settings"]["wire_server"] = wire_server + if vm["security_profile"]["proxy_agent_settings"].get("imds", None) is None: + vm["security_profile"]["proxy_agent_settings"]["imds"] = imds + + if enable_proxy_agent is not None: + vm["security_profile"]["proxy_agent_settings"]["enabled"] = enable_proxy_agent + if key_incarnation_id is not None: + vm["security_profile"]["proxy_agent_settings"]["key_incarnation_id"] = key_incarnation_id + if wire_server_mode is not None: + vm["security_profile"]["proxy_agent_settings"]["wire_server"]["mode"] = wire_server_mode + if imds_mode is not None: + vm["security_profile"]["proxy_agent_settings"]["imds"]["mode"] = imds_mode + + if workspace is not None: + workspace_id = _prepare_workspace(cmd, resource_group_name, workspace) + workspace_name = parse_resource_id(workspace_id)['name'] + _set_log_analytics_workspace_extension(cmd=cmd, + resource_group_name=resource_group_name, + vm=vm, + vm_name=vm_name, + workspace_name=workspace_name) + os_type = vm["storage_profile"]["os_disk"]["os_type"] \ + if vm.get("storage_profile", {}).get("os_disk", {}).get("os_type", None) is not None else None + _set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name) + + if size is not None: + if vm.get("hardware_profile", {}).get("vm_size", None) == size: + logger.warning("VM size is already %s", size) + else: + if vm.get("hardware_profile", None) is None: + vm["hardware_profile"] = {} + vm["hardware_profile"]["vm_size"] = size + + if v_cpus_available is not None: + if vm.get("hardware_profile", None) is None: + vm["hardware_profile"] = {} + if vm["hardware_profile"].get("vm_size_properties", None) is None: + vm["hardware_profile"]["vm_size_properties"] = {} + vm["hardware_profile"]["vm_size_properties"]["v_cp_us_available"] = v_cpus_available + + if v_cpus_per_core is not None: + if vm.get("hardware_profile", None) is None: + vm["hardware_profile"] = {} + if vm["hardware_profile"].get("vm_size_properties", None) is None: + vm["hardware_profile"]["vm_size_properties"] = {} + vm["hardware_profile"]["vm_size_properties"]["v_cp_us_per_core"] = v_cpus_per_core + + if ephemeral_os_disk_placement is not None: + if vm.get("storage_profile", {}).get("os_disk", {}).get("diff_disk_settings", None) is not None: + vm["storage_profile"]["os_disk"]["diff_disk_settings"]["placement"] = ephemeral_os_disk_placement + else: + raise ValidationError("Please update the argument '--ephemeral-os-disk-placement' when " + "creating VM with the option '--ephemeral-os-disk true'") + + if disk_controller_type is not None: + if vm.get("storage_profile", None) is None: + vm["storage_profile"] = {} + vm["storage_profile"]["disk_controller_type"] = disk_controller_type + + if additional_scheduled_events is not None or \ + enable_user_reboot_scheduled_events is not None or enable_user_redeploy_scheduled_events is not None: + if vm.get("scheduled_events_policy", None) is None: + vm["scheduled_events_policy"] = { + "scheduled_events_additional_publishing_targets": { + "event_grid_and_resource_graph": { + "enable": additional_scheduled_events if additional_scheduled_events is not None else False + }, + }, + "user_initiated_reboot": { + "automatically_approve": + enable_user_reboot_scheduled_events if enable_user_reboot_scheduled_events is not None else False # pylint: disable=line-too-long + }, + "user_initiated_redeploy": { + "automatically_approve": + enable_user_redeploy_scheduled_events if enable_user_redeploy_scheduled_events is not None else False # pylint: disable=line-too-long + } + } + else: + if additional_scheduled_events is not None: + vm["scheduled_events_policy"]["scheduled_events_additional_publishing_targets"] = { + "event_grid_and_resource_graph": { + "enable": additional_scheduled_events + } + } + if enable_user_redeploy_scheduled_events is not None: + vm["scheduled_events_policy"]["user_initiated_redeploy"] = { + "automatically_approve": enable_user_redeploy_scheduled_events + } + if enable_user_reboot_scheduled_events is not None: + vm["scheduled_events_policy"]["user_initiated_reboot"] = { + "automatically_approve": enable_user_reboot_scheduled_events + } + if wire_server_access_control_profile_reference_id is not None or \ + imds_access_control_profile_reference_id is not None or \ + add_proxy_agent_extension is not None: + kwargs['parameters'] = vm + + vm["resource_group"] = resource_group_name + vm["vm_name"] = vm_name + vm["no_wait"] = no_wait + + from .operations.vm import VMCreate + return VMCreate(cli_ctx=cmd.cli_ctx)(command_args=vm) +# endregion + + +# region VirtualMachines AvailabilitySets +def create_av_set(cmd, availability_set_name, resource_group_name, platform_fault_domain_count=2, + platform_update_domain_count=None, location=None, proximity_placement_group=None, unmanaged=False, + no_wait=False, tags=None, validate=False, additional_scheduled_events=None, + enable_user_reboot_scheduled_events=None, enable_user_redeploy_scheduled_events=None): + from azure.cli.core.util import random_string + from azure.cli.core.commands.arm import ArmTemplateBuilder + from azure.cli.command_modules.vm._template_builder import build_av_set_resource + + tags = tags or {} + + # Build up the ARM template + master_template = ArmTemplateBuilder() + + av_set_resource = build_av_set_resource(cmd, availability_set_name, location, tags, + platform_update_domain_count, + platform_fault_domain_count, unmanaged, + proximity_placement_group=proximity_placement_group, + additional_scheduled_events=additional_scheduled_events, + enable_user_reboot_scheduled_events=enable_user_reboot_scheduled_events, + enable_user_redeploy_scheduled_events=enable_user_redeploy_scheduled_events) + master_template.add_resource(av_set_resource) + + template = master_template.build() + + # deploy ARM template + deployment_name = 'av_set_deploy_' + random_string(32) + client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments + DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) + properties = DeploymentProperties(template=template, parameters={}, mode='incremental') + Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) + deployment = Deployment(properties=properties) + + if validate: + if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES): + validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment) + return LongRunningOperation(cmd.cli_ctx)(validation_poller) + + return client.validate(resource_group_name, deployment_name, deployment) + + if no_wait: + return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment) + LongRunningOperation(cmd.cli_ctx)(sdk_no_wait(no_wait, client.begin_create_or_update, + resource_group_name, deployment_name, deployment)) + + from .aaz.latest.vm.availability_set import Show as _Show + return _Show(cli_ctx=cmd.cli_ctx)(command_args={'resource_group': resource_group_name, + 'availability_set_name': availability_set_name}) + + +# endregion + + +# region VirtualMachines BootDiagnostics +class DisableBootDiagnostics(UpdateVM): + def pre_instance_update(self, instance): + from azure.cli.core.aaz import has_value + diag_profile = False if not has_value(instance.properties.diagnostics_profile) else ( + instance.properties.diagnostics_profile) + if not (diag_profile and has_value(diag_profile.boot_diagnostics) and + diag_profile.boot_diagnostics.enabled.to_serialized_data()): + return + boot_diag = {'enabled': False, 'storage_uri': None} + instance.properties.diagnostics_profile = {'boot_diagnostics': boot_diag} + + +def disable_boot_diagnostics(cmd, resource_group_name, vm_name): + ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'enabling boot diagnostics', 'done')( + DisableBootDiagnostics(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + 'vm_name': vm_name + }) + ) + + +class EnableBootDiagnostics(UpdateVM): + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + from azure.cli.core.aaz import AAZStrArg + args_schema = super()._build_arguments_schema(*args, **kwargs) + args_schema.storage = AAZStrArg( + options=["--storage"], + help="Storage account" + ) + return args_schema + + def pre_instance_update(self, instance): + from azure.cli.core.aaz import has_value + from azure.cli.command_modules.vm._vm_utils import get_storage_blob_uri + args = self.ctx.args + storage_uri = None + if has_value(args.storage): + storage_uri = get_storage_blob_uri(self.cli_ctx, args.storage.to_serialized_data()) + boot_diag = {'enabled': True, 'storage_uri': storage_uri} + instance.properties.diagnostics_profile = {'boot_diagnostics': boot_diag} + + +def enable_boot_diagnostics(cmd, resource_group_name, vm_name, storage=None): + ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'enabling boot diagnostics', 'done')( + EnableBootDiagnostics(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + 'vm_name': vm_name, + 'storage': storage + }) + ) + + +class BootLogStreamWriter: # pylint: disable=too-few-public-methods + + def __init__(self, out): + self.out = out + + def write(self, str_or_bytes): + content = str_or_bytes + if isinstance(str_or_bytes, bytes): + try: + content = str_or_bytes.decode('utf8') + except UnicodeDecodeError: + logger.warning("A few characters have been ignored because they were not valid unicode.") + content = str_or_bytes.decode('ascii', 'ignore') + try: + self.out.write(content) + except UnicodeEncodeError: + # e.g. 'charmap' codec can't encode characters in position 258829-258830: character maps to + import unicodedata + ascii_content = unicodedata.normalize('NFKD', content).encode('ascii', 'ignore') + self.out.write(ascii_content.decode()) + logger.warning("A few unicode characters have been ignored because the shell is not able to display. " + "To see the full log, use a shell with unicode capacity") + + +def get_boot_log(cmd, resource_group_name, vm_name): + import re + import sys + from azure.cli.core.profiles import get_sdk + from azure.core.exceptions import HttpResponseError + from .aaz.latest.vm.boot_diagnostics import GetBootLogUris as VmGetBootLogUris + BlobClient = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE_BLOB, '_blob_client#BlobClient') + + virtual_machine = get_instance_view(cmd, resource_group_name, vm_name) + + blob_uri = None + if virtual_machine.get('instanceView', {}).get('bootDiagnostics'): + blob_uri = virtual_machine['instanceView']['bootDiagnostics'].get('serialConsoleLogBlobUri') + + # Managed storage + if blob_uri is None: + try: + command_args = { + 'resource_group': resource_group_name, + 'name': vm_name + } + boot_diagnostics_data = VmGetBootLogUris(cli_ctx=cmd.cli_ctx)(command_args=command_args) + blob_uri = boot_diagnostics_data.get('serialConsoleLogBlobUri') + except HttpResponseError: + pass + if blob_uri is None: + raise CLIError('Please enable boot diagnostics.') + return requests.get(blob_uri).content + + # Find storage account for diagnostics + storage_mgmt_client = _get_storage_management_client(cmd.cli_ctx) + if not blob_uri: + raise CLIError('No console log available') + try: + storage_accounts = storage_mgmt_client.storage_accounts.list() + matching_storage_account = (a for a in list(storage_accounts) + if a.primary_endpoints.blob and blob_uri.startswith(a.primary_endpoints.blob)) + storage_account = next(matching_storage_account) + except StopIteration: + raise CLIError('Failed to find storage account for console log file') + + regex = r'/subscriptions/[^/]+/resourceGroups/(?P[^/]+)/.+' + match = re.search(regex, storage_account.id, re.I) + rg = match.group('rg') + # Get account key + keys = storage_mgmt_client.storage_accounts.list_keys(rg, storage_account.name) + + blob_client = BlobClient.from_blob_url(blob_url=blob_uri, credential=keys.keys[0].value) + + # our streamwriter not seekable, so no parallel. + downloader = blob_client.download_blob(max_concurrency=1) + downloader.readinto(BootLogStreamWriter(sys.stdout)) +# endregion + + +# region VirtualMachines Diagnostics +def set_diagnostics_extension(cmd, resource_group_name, vm_name, settings, protected_settings=None, version=None, + no_auto_upgrade=False): + from .aaz.latest.vm.extension import Delete as VmExtensionDelete + vm = get_instance_view(cmd, resource_group_name, vm_name) + is_linux_os = _is_linux_os_aaz(vm) + vm_extension_name = _LINUX_DIAG_EXT if is_linux_os else _WINDOWS_DIAG_EXT + if is_linux_os: # check incompatible version + exts = vm.get('instanceView', {}).get('extensions', []) + major_ver = extension_mappings[_LINUX_DIAG_EXT]['version'].split('.', maxsplit=1)[0] + if next((e for e in exts if e.get('name') == vm_extension_name and + not e.get('typeHandlerVersion', '').startswith(major_ver + '.')), None): + logger.warning('There is an incompatible version of diagnostics extension installed. ' + 'We will update it with a new version') + poller = VmExtensionDelete(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + 'vm_extension_name': vm_extension_name, + 'vm_name': vm_name + }) + LongRunningOperation(cmd.cli_ctx)(poller) + + return set_extension(cmd, resource_group_name, vm_name, vm_extension_name, + extension_mappings[vm_extension_name]['publisher'], + version or extension_mappings[vm_extension_name]['version'], + settings, protected_settings, no_auto_upgrade) + + +def show_default_diagnostics_configuration(is_windows_os=False): + public_settings = get_default_diag_config(is_windows_os) + # pylint: disable=line-too-long + protected_settings_info = json.dumps({ + 'storageAccountName': "__STORAGE_ACCOUNT_NAME__", + # LAD and WAD are not consistent on sas token format. Call it out here + "storageAccountSasToken": "__SAS_TOKEN_{}__".format("WITH_LEADING_QUESTION_MARK" if is_windows_os else "WITHOUT_LEADING_QUESTION_MARK") + }, indent=2) + logger.warning('Protected settings with storage account info is required to work with the default configurations, e.g. \n%s', protected_settings_info) + return public_settings +# endregion + + +# region VirtualMachines Disks (Managed) +def attach_managed_data_disk(cmd, resource_group_name, vm_name, disk=None, ids=None, disks=None, new=False, sku=None, + size_gb=None, lun=None, caching=None, enable_write_accelerator=False, disk_ids=None, + source_snapshots_or_disks=None, source_disk_restore_point=None, + new_names_of_source_snapshots_or_disks=None, new_names_of_source_disk_restore_point=None): + # attach multiple managed disks using disk attach API + vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) + + if not new and not sku and not size_gb and disk_ids is not None: + if lun: + disk_lun = lun + else: + disk_lun = _get_disk_lun_by_aaz(vm.get("storageProfile", {}).get("dataDisks", [])) + + data_disks = [] + for disk_item in disk_ids: + disk = { + 'diskId': disk_item, + 'caching': caching, + 'lun': disk_lun, + 'writeAcceleratorEnabled': enable_write_accelerator + } + data_disks.append(disk) + disk_lun += 1 + result = AttachDetachDataDisk(cli_ctx=cmd.cli_ctx)(command_args={ + 'vm_name': vm_name, + 'resource_group': resource_group_name, + 'data_disks_to_attach': data_disks + }) + return result + else: + # attach multiple managed disks using vm PUT API + from azure.mgmt.core.tools import parse_resource_id + from .operations.vm import convert_show_result_to_snake_case + + if size_gb is None: + default_size_gb = 1023 + + if disk_ids is not None: + disks = disk_ids + + for disk_item in disks: + if lun: + disk_lun = lun + else: + disk_lun = _get_disk_lun_by_aaz(vm.get("storageProfile", {}).get("dataDisks", [])) + + if new: + data_disk = { + 'lun': disk_lun, + 'createOption': 'Empty', + 'name': parse_resource_id(disk_item)['name'], + 'diskSizeGB': size_gb if size_gb else default_size_gb, + 'caching': caching, + 'managedDisk': { + 'storageAccountType': sku + } + } + else: + data_disk = { + 'lun': disk_lun, + 'createOption': 'Attach', + 'managedDisk': { + 'id': disk_item, + 'storageAccountType': sku + }, + 'caching': caching + } + + if enable_write_accelerator: + data_disk["writeAcceleratorEnabled"] = enable_write_accelerator + + if "storageProfile" not in vm: + vm["storageProfile"] = {} + if "dataDisks" not in vm["storageProfile"]: + vm["storageProfile"]["dataDisks"] = [] + vm["storageProfile"]["dataDisks"].append(data_disk) + disk_lun = _get_disk_lun_by_aaz(vm.get("storageProfile", {}).get("dataDisks", [])) + if source_snapshots_or_disks is not None: + if new_names_of_source_snapshots_or_disks is None: + new_names_of_source_snapshots_or_disks = [None] * len(source_snapshots_or_disks) + for disk_id, disk_name in zip(source_snapshots_or_disks, new_names_of_source_snapshots_or_disks): + disk = { + 'name': disk_name, + 'createOption': 'Copy', + 'caching': caching, + 'lun': disk_lun, + 'writeAcceleratorEnabled': enable_write_accelerator, + "sourceResource": { + "id": disk_id + } + } + if size_gb is not None: + disk.update({ + 'diskSizeGB': size_gb + }) + if sku is not None: + disk.update({ + "managedDisk": { + "storageAccountType": sku + } + }) + disk_lun += 1 + if "storageProfile" not in vm: + vm["storageProfile"] = {} + if "dataDisks" not in vm["storageProfile"]: + vm["storageProfile"]["dataDisks"] = [] + vm["storageProfile"]["dataDisks"].append(disk) + if source_disk_restore_point is not None: + if new_names_of_source_disk_restore_point is None: + new_names_of_source_disk_restore_point = [None] * len(source_disk_restore_point) + for disk_id, disk_name in zip(source_disk_restore_point, new_names_of_source_disk_restore_point): + disk = { + 'name': disk_name, + 'createOption': 'Restore', + 'caching': caching, + 'lun': disk_lun, + 'writeAcceleratorEnabled': enable_write_accelerator, + "sourceResource": { + "id": disk_id + } + } + if size_gb is not None: + disk.update({ + 'diskSizeGB': size_gb + }) + if sku is not None: + disk.update({ + "managedDisk": { + "storageAccountType": sku + } + }) + disk_lun += 1 + if "storageProfile" not in vm: + vm["storageProfile"] = {} + if "dataDisks" not in vm["storageProfile"]: + vm["storageProfile"]["dataDisks"] = [] + vm["storageProfile"]["dataDisks"].append(disk) + + vm = convert_show_result_to_snake_case(vm) + set_vm_by_aaz(cmd, vm) + + +def detach_unmanaged_data_disk(cmd, resource_group_name, vm_name, disk_name): + from .operations.vm import convert_show_result_to_snake_case + # here we handle unmanaged disk + vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) + vm = convert_show_result_to_snake_case(vm) + leftovers = [d for d in vm.get('storage_profile', {}).get('data_disks', []) if + d.get('name', '').lower() != disk_name.lower()] + if len(vm.get('storage_profile', {}).get('data_disks', [])) == len(leftovers): + raise CLIError("No disk with the name '{}' was found".format(disk_name)) + + vm['storage_profile']['data_disks'] = leftovers + + set_vm_by_aaz(cmd, vm) +# endregion + + +def detach_managed_data_disk(cmd, resource_group_name, vm_name, disk_name=None, force_detach=None, disk_ids=None): + from .operations.vm import convert_show_result_to_snake_case + + if disk_ids is not None: + data_disks = [] + for disk_item in disk_ids: + disk = {'diskId': disk_item, 'detachOption': 'ForceDetach' if force_detach else None} + data_disks.append(disk) + result = AttachDetachDataDisk(cli_ctx=cmd.cli_ctx)(command_args={ + 'vm_name': vm_name, + 'resource_group': resource_group_name, + 'data_disks_to_detach': data_disks + }) + return result + else: + # here we handle managed disk + vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) + if not force_detach: + # pylint: disable=no-member + leftovers = [d for d in vm.get("storageProfile", {}).get("dataDisks", []) + if d["name"].lower() != disk_name.lower()] + if len(vm.get("storageProfile", {}).get("dataDisks", [])) == len(leftovers): + raise ResourceNotFoundError("No disk with the name '{}' was found".format(disk_name)) + else: + leftovers = vm.get("storageProfile", {}).get("dataDisks", []) + is_contains = False + for d in leftovers: + if d["name"].lower() == disk_name.lower(): + d["toBeDetached"] = True + d["detachOption"] = "ForceDetach" + is_contains = True + break + if not is_contains: + raise ResourceNotFoundError("No disk with the name '{}' was found".format(disk_name)) + if "storageProfile" not in vm: + vm["storageProfile"] = {} + vm["storageProfile"]["dataDisks"] = leftovers + vm = convert_show_result_to_snake_case(vm) + set_vm_by_aaz(cmd, vm) +# endregion + + +# region VirtualMachines Extensions +def list_extensions(cmd, resource_group_name, vm_name): + from .operations.vm_extension import VMExtensionList + return VMExtensionList(cli_ctx=cmd.cli_ctx)(command_args={ + 'vm_name': vm_name, + 'resource_group': resource_group_name, + })['value'] + + +def show_extensions(cmd, resource_group_name, vm_name, vm_extension_name, instance_view=False, expand=None): + from .operations.vm_extension import VMExtensionShow + if instance_view: + expand = 'instanceView' + + return VMExtensionShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'vm_extension_name': vm_extension_name, + 'resource_group': resource_group_name, + 'vm_name': vm_name, + 'expand': expand + }) + + +def set_extension(cmd, resource_group_name, vm_name, vm_extension_name, publisher, version=None, settings=None, + protected_settings=None, no_auto_upgrade=False, force_update=False, no_wait=False, + extension_instance_name=None, enable_auto_upgrade=None): + from .operations.vm import VMShow as _VMShow + vm = _VMShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'vm_name': vm_name, + 'resource_group': resource_group_name, + 'expand': 'instanceView' + }) + + if not extension_instance_name: + extension_instance_name = vm_extension_name + + instance_name = _get_extension_instance_name_aaz(vm['instanceView'], publisher, vm_extension_name, + suggested_name=extension_instance_name) + if instance_name != extension_instance_name: + msg = "A %s extension with name %s already exists. Updating it with your settings..." + logger.warning(msg, vm_extension_name, instance_name) + if vm_extension_name == 'AHBForRHEL': + logger.warning('Please ensure that you are provisioning AHBForRHEL extension ' + 'on a Red Hat based operating system.') + if vm_extension_name == 'AHBForSLES': + logger.warning('Please ensure that you are provisioning AHBForSLES extension on a SLES based operating system.') + + auto_upgrade_extensions = ['GuestAttestation', 'CodeIntegrityAgent'] + if vm_extension_name in auto_upgrade_extensions and enable_auto_upgrade is None: + enable_auto_upgrade = True + + version = _normalize_extension_version(cmd.cli_ctx, publisher, vm_extension_name, version, vm['location']) + + from .operations.vm_extension import VMExtensionCreate as ExtensionSet + ext_args = { + 'resource_group': resource_group_name, + 'vm_name': vm_name, + 'vm_extension_name': instance_name, + 'location': vm['location'], + 'publisher': publisher, + 'type': vm_extension_name, + 'protected_settings': protected_settings, + 'type_handler_version': version, + 'settings': settings, + 'auto_upgrade_minor_version': (not no_auto_upgrade), + 'enable_automatic_upgrade': enable_auto_upgrade, + 'no_wait': no_wait + } + if force_update: + ext_args['force_update_tag'] = str(_gen_guid()) + return ExtensionSet(cli_ctx=cmd.cli_ctx)(command_args=ext_args) +# endregion + + +# region VirtualMachines Extension Images +def list_vm_extension_images( + cmd, image_location=None, publisher_name=None, name=None, version=None, latest=False): + return load_extension_images_thru_services( + cmd.cli_ctx, publisher_name, name, version, image_location, latest) +# endregion + + +# region VirtualMachines Identity +def _remove_identities_by_aaz(cmd, resource_group_name, name, identities, getter, setter): + from ._vm_utils import MSI_LOCAL_ID + + remove_system_assigned_identity = False + + if MSI_LOCAL_ID in identities: + remove_system_assigned_identity = True + identities.remove(MSI_LOCAL_ID) + + resource = getter(cmd, resource_group_name, name) + existing_identity = resource.get('identity') + + if existing_identity is None: + return None + + existing_emsis = [x.lower() for x in (existing_identity.get('userAssignedIdentities') or {}).keys()] + existing_identity['userAssignedIdentities'] = {} + + if identities: + emsis_to_remove = [x.lower() for x in identities] + + non_existing = [emsis for emsis in emsis_to_remove if emsis not in existing_emsis] + if non_existing: + raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name)) + + emsis_to_retain = [emsis for emsis in existing_emsis if emsis not in emsis_to_remove] + + if not emsis_to_retain: # if all emsis are gone, we need to update the type + if existing_identity['type'] == IdentityType.USER_ASSIGNED.value: + existing_identity['type'] = IdentityType.NONE.value + elif existing_identity['type'] == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value: + existing_identity['type'] = IdentityType.SYSTEM_ASSIGNED.value + + for emsis in identities: + existing_identity['userAssignedIdentities'][emsis] = {} + + if remove_system_assigned_identity: + if existing_identity['type'] == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value \ + or existing_identity['type'] == IdentityType.USER_ASSIGNED.value: + existing_identity['type'] = IdentityType.USER_ASSIGNED.value + else: + existing_identity['type'] = IdentityType.NONE.value + + result = LongRunningOperation(cmd.cli_ctx)(setter(resource_group_name, name, resource)) + + if not result: + return None + + return result.get('identity') or None + + +def remove_vm_identity(cmd, resource_group_name, vm_name, identities=None): + def setter(resource_group_name, vm_name, vm): + command_args = { + 'resource_group': resource_group_name, + 'vm_name': vm_name + } + + if vm.get('identity') and vm.get('identity').get('type') == IdentityType.USER_ASSIGNED.value: + # NOTE: The literal 'UserAssigned' is intentionally appended as a marker for + # VMIdentityRemove._format_content, which uses it to apply special handling + # for purely user-assigned identities. It is not a real identity resource ID. + command_args['mi_user_assigned'] = \ + list(vm.get('identity', {}).get('userAssignedIdentities', {}).keys()) + ['UserAssigned'] + elif vm.get('identity') and vm.get('identity').get('type') == IdentityType.SYSTEM_ASSIGNED.value: + command_args['mi_user_assigned'] = [] + command_args['mi_system_assigned'] = 'True' + elif vm.get('identity') and vm.get('identity').get('type') == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value: + command_args['mi_user_assigned'] = list(vm.get('identity', {}).get('userAssignedIdentities', {}).keys()) + command_args['mi_system_assigned'] = 'True' + else: + command_args['mi_user_assigned'] = [] + + from .operations.vm import VMIdentityRemove + return VMIdentityRemove(cli_ctx=cmd.cli_ctx)(command_args=command_args) + + if identities is None: + from ._vm_utils import MSI_LOCAL_ID + identities = [MSI_LOCAL_ID] + + return _remove_identities_by_aaz(cmd, resource_group_name, vm_name, identities, get_vm_by_aaz, setter) + + +# region VirtualMachines Images +def list_vm_images(cmd, image_location=None, publisher_name=None, offer=None, sku=None, all=False, # pylint: disable=redefined-builtin + edge_zone=None, architecture=None): + load_thru_services = all or edge_zone is not None + + if load_thru_services: + if not publisher_name and not offer and not sku and not edge_zone: + logger.warning("You are retrieving all the images from server which could take more than a minute. " + "To shorten the wait, provide '--publisher', '--offer' , '--sku' or '--edge-zone'." + " Partial name search is supported.") + all_images = load_images_thru_services(cmd.cli_ctx, publisher_name, offer, sku, image_location, edge_zone, + architecture) + else: + all_images = load_images_from_aliases_doc(cmd.cli_ctx, publisher_name, offer, sku, architecture) + logger.warning('You are viewing an offline list of images, use --all to retrieve an up-to-date list') + + if edge_zone is not None: + for i in all_images: + i['urn'] = ':'.join([i['publisher'], i['offer'], i['sku'], i['edge_zone'], i['version']]) + else: + for i in all_images: + i['urn'] = ':'.join([i['publisher'], i['offer'], i['sku'], i['version']]) + return all_images + + +def list_offers(cmd, publisher_name, location, edge_zone=None): + if edge_zone is not None: + from .aaz.latest.vm.image.edge_zone import ListOffers + return ListOffers(cli_ctx=cmd.cli_ctx)(command_args={ + 'location': location, + 'edge_zone': edge_zone, + 'publisher': publisher_name + }) + else: + from .aaz.latest.vm.image import ListOffers + return ListOffers(cli_ctx=cmd.cli_ctx)(command_args={ + 'location': location, + 'publisher': publisher_name + }) + + +def list_publishers(cmd, location, edge_zone=None): + if edge_zone is not None: + from .aaz.latest.vm.image.edge_zone import ListPublishers + return ListPublishers(cli_ctx=cmd.cli_ctx)(command_args={ + 'location': location, + 'edge_zone': edge_zone, + }) + else: + from .aaz.latest.vm.image import ListPublishers + return ListPublishers(cli_ctx=cmd.cli_ctx)(command_args={ + 'location': location, + }) + + +def list_sku(cmd, location, publisher_name, offer, edge_zone=None,): + if edge_zone is not None: + from .aaz.latest.vm.image.edge_zone import ListSkus + return ListSkus(cli_ctx=cmd.cli_ctx)(command_args={ + 'location': location, + 'edge_zone': edge_zone, + 'publisher': publisher_name, + 'offer': offer, + }) + else: + from .aaz.latest.vm.image import ListSkus + return ListSkus(cli_ctx=cmd.cli_ctx)(command_args={ + 'location': location, + 'publisher': publisher_name, + 'offer': offer, + }) + + +def show_vm_image(cmd, urn=None, publisher=None, offer=None, sku=None, version=None, location=None, edge_zone=None): + from azure.cli.core.commands.parameters import get_one_of_subscription_locations + from azure.cli.core.azclierror import (MutuallyExclusiveArgumentError, + InvalidArgumentValueError) + + location = location or get_one_of_subscription_locations(cmd.cli_ctx) + error_msg = 'Please specify all of (--publisher, --offer, --sku, --version), or --urn' + if urn: + if any([publisher, offer, sku, edge_zone, version]): + recommendation = 'Try to use --urn publisher:offer:sku:version or' \ + ' --urn publisher:offer:sku:edge_zone:version' + raise MutuallyExclusiveArgumentError(error_msg, recommendation) + items = urn.split(":") + if len(items) != 4 and len(items) != 5: + raise InvalidArgumentValueError( + '--urn should be in the format of publisher:offer:sku:version or publisher:offer:sku:edge_zone:version') + if len(items) == 5: + publisher, offer, sku, edge_zone, version = urn.split(":") + elif len(items) == 4: + publisher, offer, sku, version = urn.split(":") + if version.lower() == 'latest': + version = _get_latest_image_version(cmd.cli_ctx, location, publisher, offer, sku) + elif not publisher or not offer or not sku or not version: + raise RequiredArgumentMissingError(error_msg) + if edge_zone is not None: + from .aaz.latest.vm.image.edge_zone import Show + return Show(cli_ctx=cmd.cli_ctx)(command_args={ + 'location': location, + 'edge_zone': edge_zone, + 'publisher': publisher, + 'offer': offer, + 'sku': sku, + 'version': version, + }) + else: + from .aaz.latest.vm.image import Show + return Show(cli_ctx=cmd.cli_ctx)(command_args={ + 'location': location, + 'publisher': publisher, + 'offer': offer, + 'sku': sku, + 'version': version, + }) + + +def accept_market_ordering_terms(cmd, urn=None, publisher=None, offer=None, plan=None): + from azure.mgmt.marketplaceordering import MarketplaceOrderingAgreements + from azure.mgmt.marketplaceordering.models import OfferType + from azure.cli.core.azclierror import (MutuallyExclusiveArgumentError, + InvalidArgumentValueError) + + error_msg = 'Please specify all of (--plan, --offer, --publish), or --urn' + if urn: + if any([publisher, offer, plan]): + recommendation = 'Try to use --urn publisher:offer:sku:version only' + raise MutuallyExclusiveArgumentError(error_msg, recommendation) + items = urn.split(':') + if len(items) != 4: + raise InvalidArgumentValueError('--urn should be in the format of publisher:offer:sku:version') + publisher, offer, _, _ = items + image = show_vm_image(cmd, urn) + if not image.get('plan', None): + logger.warning("Image '%s' has no terms to accept.", urn) + return + plan = image['plan']['name'] + else: + if not publisher or not offer or not plan: + raise RequiredArgumentMissingError(error_msg) + + market_place_client = get_mgmt_service_client(cmd.cli_ctx, MarketplaceOrderingAgreements) + + term = market_place_client.marketplace_agreements.get(offer_type=OfferType.VIRTUALMACHINE, + publisher_id=publisher, + offer_id=offer, + plan_id=plan) + term.accepted = True + return market_place_client.marketplace_agreements.create(offer_type=OfferType.VIRTUALMACHINE, + publisher_id=publisher, + offer_id=offer, + plan_id=plan, + parameters=term) +# endregion + + +def _terms_prepare(cmd, urn, publisher, offer, plan): + if urn: + if any([publisher, offer, plan]): + raise CLIError('usage error: If using --urn, do not use any of --plan, --offer, --publisher.') + terms = urn.split(':') + if len(terms) != 4: + raise CLIError('usage error: urn should be in the format of publisher:offer:sku:version.') + publisher, offer = terms[0], terms[1] + image = show_vm_image(cmd, urn) + if not image.get('plan', None): + raise CLIError("Image '%s' has no terms to accept." % urn) + plan = image['plan']['name'] + else: + if not all([publisher, offer, plan]): + raise CLIError( + 'usage error: If not using --urn, all of --plan, --offer and --publisher should be provided.') + return publisher, offer, plan + + +def _accept_cancel_terms(cmd, urn, publisher, offer, plan, accept): + from azure.mgmt.marketplaceordering.models import OfferType + publisher, offer, plan = _terms_prepare(cmd, urn, publisher, offer, plan) + op = cf_vm_image_term(cmd.cli_ctx, '') + terms = op.get(offer_type=OfferType.VIRTUALMACHINE, + publisher_id=publisher, + offer_id=offer, + plan_id=plan) + terms.accepted = accept + return op.create(offer_type=OfferType.VIRTUALMACHINE, + publisher_id=publisher, + offer_id=offer, + plan_id=plan, + parameters=terms) + + +def accept_terms(cmd, urn=None, publisher=None, offer=None, plan=None): + """ + Accept Azure Marketplace image terms so that the image can be used to create VMs. + :param cmd:cmd + :param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted + :param publisher:Image publisher + :param offer:Image offer + :param plan:Image billing plan + :return: + """ + return _accept_cancel_terms(cmd, urn, publisher, offer, plan, True) + + +def cancel_terms(cmd, urn=None, publisher=None, offer=None, plan=None): + """ + Cancel Azure Marketplace image terms. + :param cmd:cmd + :param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted + :param publisher:Image publisher + :param offer:Image offer + :param plan:Image billing plan + :return: + """ + return _accept_cancel_terms(cmd, urn, publisher, offer, plan, False) + + +def get_terms(cmd, urn=None, publisher=None, offer=None, plan=None): + """ + Get the details of Azure Marketplace image terms. + :param cmd:cmd + :param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted + :param publisher:Image publisher + :param offer:Image offer + :param plan:Image billing plan + :return: + """ + from azure.mgmt.marketplaceordering.models import OfferType + publisher, offer, plan = _terms_prepare(cmd, urn, publisher, offer, plan) + op = cf_vm_image_term(cmd.cli_ctx, '') + terms = op.get(offer_type=OfferType.VIRTUALMACHINE, + publisher_id=publisher, + offer_id=offer, + plan_id=plan) + return terms + + +# region VirtualMachines NetworkInterfaces (NICs) +def show_vm_nic(cmd, resource_group_name, vm_name, nic): + from azure.mgmt.core.tools import parse_resource_id + + NicShow = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nic").Show + + vm = get_vm_by_aaz(cmd, resource_group_name, vm_name) + + found = next( + (n for n in vm.get("networkProfile", {}).get("networkInterfaces", []) if nic.lower() == n["id"].lower()), None + # pylint: disable=no-member + ) + if found: + nic_name = parse_resource_id(found["id"])['name'] + return NicShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': nic_name, + 'resource_group': resource_group_name + }) + raise CLIError("NIC '{}' not found on VM '{}'".format(nic, vm_name)) + + +def list_vm_nics(cmd, resource_group_name, vm_name): + vm = get_vm_by_aaz(cmd, resource_group_name, vm_name) + return vm.get("networkProfile", {}).get("networkInterfaces", []) # pylint: disable=no-member + + +def add_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None): + vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) + new_nics = _build_nic_list(cmd, nics) + existing_nics = _get_existing_nics(vm) + return _update_vm_nics(cmd, vm, existing_nics + new_nics, primary_nic) + + +def remove_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None): + + def to_delete(nic_id): + return [n for n in nics_to_delete if n["id"].lower() == nic_id.lower()] + + vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) + nics_to_delete = _build_nic_list(cmd, nics) + existing_nics = _get_existing_nics(vm) + + survived = [x for x in existing_nics if not to_delete(x["id"])] + + return _update_vm_nics(cmd, vm, survived, primary_nic) + + +def set_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None): + vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) + nics = _build_nic_list(cmd, nics) + return _update_vm_nics(cmd, vm, nics, primary_nic) + + +def _build_nic_list(cmd, nic_ids): + NicShow = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nic").Show + nic_list = [] + if nic_ids: + # pylint: disable=no-member + for nic_id in nic_ids: + rg, name = _parse_rg_name(nic_id) + nic = NicShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': name, + 'resource_group': rg + }) + nic_list.append({"id": nic["id"], "primary": False}) + return nic_list + + +def _get_existing_nics(vm): + network_profile = vm.get("networkProfile", None) + nics = [] + if network_profile is not None: + nics = network_profile.get("networkInterfaces", []) + return nics + + +def _update_vm_nics(cmd, vm, nics, primary_nic): + from .operations.vm import convert_show_result_to_snake_case + + if primary_nic: + try: + _, primary_nic_name = _parse_rg_name(primary_nic) + except IndexError: + primary_nic_name = primary_nic + + matched = [n for n in nics if _parse_rg_name(n["id"])[1].lower() == primary_nic_name.lower()] + if not matched: + raise CLIError('Primary Nic {} is not found'.format(primary_nic)) + if len(matched) > 1: + raise CLIError('Duplicate Nic entries with name {}'.format(primary_nic)) + for n in nics: + n["primary"] = False + matched[0]["primary"] = True + elif nics: + if not [n for n in nics if n["primary"]]: + nics[0]["primary"] = True + + if "networkProfile" not in vm: + vm["networkProfile"] = {} + vm["networkProfile"]["networkInterfaces"] = nics + vm = convert_show_result_to_snake_case(vm) + result = set_vm_by_aaz(cmd, vm) + return (result.get("networkProfile") or {}).get("networkInterfaces") or [] +# endregion + + +# region VirtualMachines RunCommand +def run_command_invoke(cmd, resource_group_name, vm_vmss_name, command_id, scripts=None, parameters=None, instance_id=None): # pylint: disable=line-too-long + parameters = parameters or [] # CLI user input arg "parameters" + params = [] # AAZCommand arg for "parameters" + auto_arg_name_num = 0 + for p in parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + # RunCommand API requires named arguments, which doesn't make lots of sense for bash scripts + # using positional arguments, so here we provide names just to get API happy + # note, we don't handle mixing styles, but will consolidate by GA when API is settled + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + params.append({'name': n, 'value': v}) + + # if instance_id, this is a vmss instance + if instance_id: + from .aaz.latest.vmss.run_command import Invoke + return Invoke(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + 'vmss_name': vm_vmss_name, + 'instance_id': instance_id, + 'command_id': command_id, + 'script': scripts, + 'parameters': params + }) + + # otherwise this is a regular vm instance + from .aaz.latest.vm.run_command import Invoke + return Invoke(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + 'vm_name': vm_vmss_name, + 'command_id': command_id, + 'script': scripts, + 'parameters': params + }) + + +def vm_run_command_invoke(cmd, resource_group_name, vm_name, command_id, scripts=None, parameters=None): + return run_command_invoke(cmd, resource_group_name, vm_name, command_id, scripts, parameters) + + +def vm_run_command_create(cmd, + resource_group_name, + vm_name, + run_command_name, + location, + tags=None, + script=None, + script_uri=None, + command_id=None, + parameters=None, + protected_parameters=None, + async_execution=None, + run_as_user=None, + run_as_password=None, + timeout_in_seconds=None, + output_blob_uri=None, + error_blob_uri=None, + no_wait=False): + from .aaz.latest.vm.run_command import Create + args = {} + args['location'] = location + args['resource_group'] = resource_group_name + args['run_command_name'] = run_command_name + args['vm_name'] = vm_name + args['no_wait'] = no_wait + if tags is not None: + args['tags'] = tags + if script is not None: + args['script'] = script + if script_uri is not None: + args['script_uri'] = script_uri + if command_id is not None: + args['command_id'] = command_id + if parameters is not None: + auto_arg_name_num = 0 + args['parameters'] = [] + for p in parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + args['parameters'].append({'name': n, 'value': v}) + if protected_parameters is not None: + auto_arg_name_num = 0 + args['protected_parameters'] = [] + for p in protected_parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + args['protected_parameters'].append({'name': n, 'value': v}) + if async_execution is not None: + args['async_execution'] = async_execution + else: + args['async_execution'] = False + if run_as_user is not None: + args['run_as_user'] = run_as_user + if run_as_password is not None: + args['run_as_password'] = run_as_password + if timeout_in_seconds is not None: + args['timeout_in_seconds'] = timeout_in_seconds + if output_blob_uri is not None: + args['output_blob_uri'] = output_blob_uri + if error_blob_uri is not None: + args['error_blob_uri'] = error_blob_uri + return Create(cli_ctx=cmd.cli_ctx)(command_args=args) + + +def vm_run_command_update(cmd, + resource_group_name, + vm_name, + run_command_name, + location, + tags=None, + script=None, + script_uri=None, + command_id=None, + parameters=None, + protected_parameters=None, + async_execution=None, + run_as_user=None, + run_as_password=None, + timeout_in_seconds=None, + output_blob_uri=None, + error_blob_uri=None, + no_wait=False): + from .aaz.latest.vm.run_command import Update as _Update + + class Update(_Update): + def pre_instance_update(self, instance): + if tags is not None: + instance.tags = tags + if location is not None: + instance.location = location + if script is not None: + instance.properties.source.script = script + if script_uri is not None: + instance.properties.source.script_uri = script_uri + if command_id is not None: + instance.properties.source.command_id = command_id + if parameters is not None: + auto_arg_name_num = 0 + _params = [] + for p in parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + _params.append({'name': n, 'value': v}) + instance.properties.parameters = _params + if protected_parameters is not None: + auto_arg_name_num = 0 + _params = [] + for p in protected_parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + _params.append({'name': n, 'value': v}) + instance.properties.protected_parameters = _params + if async_execution is not None: + instance.properties.async_execution = async_execution + else: + instance.properties.async_execution = False + if run_as_user is not None: + instance.properties.run_as_user = run_as_user + if run_as_password is not None: + instance.properties.run_as_password = run_as_password + if timeout_in_seconds is not None: + instance.properties.timeout_in_seconds = timeout_in_seconds + if output_blob_uri is not None: + instance.properties.output_blob_uri = output_blob_uri + if error_blob_uri is not None: + instance.properties.error_blob_uri = error_blob_uri + + args = {} + args['resource_group'] = resource_group_name + args['run_command_name'] = run_command_name + args['vm_name'] = vm_name + args['no_wait'] = no_wait + return Update(cli_ctx=cmd.cli_ctx)(command_args=args) + + +def vm_run_command_list(cmd, + resource_group_name=None, + vm_name=None, + expand=None, + location=None): + + if not location and not (resource_group_name and vm_name): + raise RequiredArgumentMissingError("Please specify --location or specify --vm-name and --resource-group") + + from .aaz.latest.vm.run_command import List, ListBySubscription + + if vm_name: + return List(cli_ctx=cmd.cli_ctx)(command_args={ + "resource_group": resource_group_name, + "vm_name": vm_name, + "expand": expand + }) + + return ListBySubscription(cli_ctx=cmd.cli_ctx)(command_args={ + "location": location + }) + + +def vm_run_command_show(cmd, + resource_group_name=None, + vm_name=None, + run_command_name=None, + expand=None, + instance_view=False, + location=None, + command_id=None): + + if not (resource_group_name and vm_name and run_command_name) and not (location and command_id): + raise RequiredArgumentMissingError( + "Please specify --location and --command-id or specify --vm-name, --resource-group and --run-command-name") + + from .aaz.latest.vm.run_command import Show, ShowById + + if vm_name: + if instance_view: + expand = 'instanceView' + return Show(cli_ctx=cmd.cli_ctx)(command_args={ + "resource_group": resource_group_name, + "vm_name": vm_name, + "expand": expand, + "run_command_name": run_command_name + }) + + return ShowById(cli_ctx=cmd.cli_ctx)(command_args={ + "location": location, + "command_id": command_id + }) + +# endregion + + +# region VirtualMachines Secrets +def _get_vault_id_from_name(cli_ctx, client, vault_name): + group_name = _get_resource_group_from_vault_name(cli_ctx, vault_name) + if not group_name: + raise CLIError("unable to find vault '{}' in current subscription.".format(vault_name)) + vault = client.get(group_name, vault_name) + return vault.id + + +def get_vm_format_secret(cmd, secrets, certificate_store=None, keyvault=None, resource_group_name=None): + from azure.keyvault.secrets._shared import parse_key_vault_id + import re + client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT).vaults + grouped_secrets = {} + + merged_secrets = [] + for s in secrets: + merged_secrets += s.splitlines() + + # group secrets by source vault + for secret in merged_secrets: + parsed = parse_key_vault_id(secret) + match = re.search('://(.+?)\\.', parsed.vault_url) + vault_name = match.group(1) + if vault_name not in grouped_secrets: + grouped_secrets[vault_name] = { + 'vaultCertificates': [], + 'id': keyvault or _get_vault_id_from_name(cmd.cli_ctx, client, vault_name) + } + + vault_cert = {'certificateUrl': secret} + if certificate_store: + vault_cert['certificateStore'] = certificate_store + + grouped_secrets[vault_name]['vaultCertificates'].append(vault_cert) + + # transform the reduced map to vm format + formatted = [{'sourceVault': {'id': value['id']}, + 'vaultCertificates': value['vaultCertificates']} + for _, value in list(grouped_secrets.items())] + + return formatted + + +def add_vm_secret(cmd, resource_group_name, vm_name, keyvault, certificate, certificate_store=None): + from azure.mgmt.core.tools import parse_resource_id + from ._vm_utils import create_data_plane_keyvault_certificate_client, get_key_vault_base_url + from .operations.vm import convert_show_result_to_snake_case + vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) + vm = convert_show_result_to_snake_case(vm) + + if '://' not in certificate: # has a cert name rather a full url? + keyvault_client = create_data_plane_keyvault_certificate_client( + cmd.cli_ctx, get_key_vault_base_url(cmd.cli_ctx, parse_resource_id(keyvault)['name'])) + cert_info = keyvault_client.get_certificate(certificate) + certificate = cert_info.secret_id + + if not _is_linux_os_by_aaz(vm): + certificate_store = certificate_store or 'My' + elif certificate_store: + raise CLIError('Usage error: --certificate-store is only applicable on Windows VM') + vault_cert = { + 'certificate_store': certificate_store, + 'certificate_url': certificate + } + vault_secret_group = next((x for x in vm.get('os_profile', {}).get('secrets', []) + if x.get('source_vault', {}).get('id', '').lower() == keyvault.lower()), None) + if vault_secret_group: + certs = vault_secret_group.get('vault_certificates', []) + certs.append(vault_cert) + vault_secret_group['vault_certificates'] = certs + else: + vault_secret_group = { + 'source_vault': { + 'id': keyvault + }, + 'vault_certificates': [vault_cert] + } + + if not vm.get('os_profile'): + vm['os_profile'] = {'secret': []} + + if not vm.get('os_profile').get('secrets'): + vm['os_profile']['secrets'] = [] + + vm['os_profile']['secrets'].append(vault_secret_group) + + vm = set_vm_by_aaz(cmd, vm) + return vm.get('osProfile', {}).get('secrets', []) + + +def list_vm_secrets(cmd, resource_group_name, vm_name): + vm = get_vm_by_aaz(cmd, resource_group_name, vm_name) + + if vm.get('osProfile', {}).get('secrets', []): + for secret in vm['osProfile']['secrets']: + for cert in secret.get('vaultCertificates', []): + if not cert.get('certificateStore'): + cert['certificateStore'] = None + + return vm.get('osProfile', {}).get('secrets', []) + + +def remove_vm_secret(cmd, resource_group_name, vm_name, keyvault, certificate=None): + from .operations.vm import convert_show_result_to_snake_case + vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) + + # support 2 kinds of filter: + # a. if only keyvault is supplied, we delete its whole vault group. + # b. if both keyvault and certificate are supplied, we only delete the specific cert entry. + + to_keep = vm.get('osProfile', {}).get('secrets', []) + keyvault_matched = [] + if keyvault: + keyvault = keyvault.lower() + keyvault_matched = [x for x in to_keep if x.get('sourceVault', {}).get('id', '').lower() == keyvault] + + if keyvault and not certificate: + to_keep = [x for x in to_keep if x not in keyvault_matched] + elif certificate: + temp = keyvault_matched if keyvault else to_keep + cert_url_pattern = certificate.lower() + if '://' not in cert_url_pattern: # just a cert name? + cert_url_pattern = '/' + cert_url_pattern + '/' + for x in temp: + x['vaultCertificates'] = [v for v in x.get('vaultCertificates') + if not (v.get('certificateUrl') and + cert_url_pattern in v.get('certificateUrl', '').lower())] + to_keep = [x for x in to_keep if x.get('vaultCertificates')] # purge all groups w/o any cert entries + + vm['osProfile']['secrets'] = to_keep + vm = convert_show_result_to_snake_case(vm) + vm = set_vm_by_aaz(cmd, vm) + return vm.get('osProfile', {}).get('secrets', []) +# endregion + + +# region VirtualMachines UnmanagedDisks +def attach_unmanaged_data_disk(cmd, resource_group_name, vm_name, new=False, vhd_uri=None, lun=None, + disk_name=None, size_gb=1023, caching=None): + from .operations.vm import convert_show_result_to_snake_case + from ._vm_utils import DiskCreateOptionTypes + if not new and not disk_name: + raise CLIError('Please provide the name of the existing disk to attach') + + vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) + vm = convert_show_result_to_snake_case(vm) + if disk_name is None: + import datetime + disk_name = vm_name + '-' + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") + # pylint: disable=no-member + if vhd_uri is None: + if not vm.get('storage_profile', {}).get('os_disk', {}).get('vhd'): + raise CLIError('Adding unmanaged disks to a VM with managed disks is not supported') + blob_uri = vm['storage_profile']['os_disk']['vhd']['uri'] + vhd_uri = blob_uri[0:blob_uri.rindex('/') + 1] + disk_name + '.vhd' + + if lun is None: + lun = _get_disk_lun_by_aaz(vm.get('storage_profile', {}).get('data_disks')) + disk = { + 'caching': caching, + 'create_option': DiskCreateOptionTypes.EMPTY.value if new else DiskCreateOptionTypes.ATTACH.value, + 'disk_size_gb': size_gb if new else None, + 'lun': lun, + 'name': disk_name, + 'vhd': { + 'uri': vhd_uri + } + } + if not vm.get('storage_profile', {}).get('data_disks'): + vm['storage_profile']['data_disks'] = [] + vm['storage_profile']['data_disks'].append(disk) + return set_vm_by_aaz(cmd, vm) + + +def list_unmanaged_disks(cmd, resource_group_name, vm_name): + vm = get_vm_by_aaz(cmd, resource_group_name, vm_name) + return vm.get('storageProfile', {}).get('dataDisks') +# endregion + + +# region VirtualMachines Users +def _update_linux_access_extension(cmd, vm_instance, resource_group_name, protected_settings, + no_wait=False): + from .operations.vm_extension import VMExtensionCreate + + # pylint: disable=no-member + instance_name = _get_extension_instance_name_aaz(vm_instance.get('instanceView', {}), + extension_mappings[_LINUX_ACCESS_EXT]['publisher'], + _LINUX_ACCESS_EXT, + _ACCESS_EXT_HANDLER_NAME) + + publisher, version, auto_upgrade = _get_access_extension_upgrade_info_aaz( + vm_instance.get('resources', []), _LINUX_ACCESS_EXT) + + poller = VMExtensionCreate(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + 'vm_name': vm_instance['name'], + 'vm_extension_name': instance_name, + 'location': vm_instance['location'], + 'publisher': publisher, + 'type': _LINUX_ACCESS_EXT, + 'type_handler_version': version, + 'settings': {}, + 'protected_settings': protected_settings, + 'auto_upgrade_minor_version': auto_upgrade, + 'no_wait': no_wait + }) + return poller + + +def _set_linux_user(cmd, vm_instance, resource_group_name, username, + password=None, ssh_key_value=None, no_wait=False): + protected_settings = {} + protected_settings['username'] = username + if password: + protected_settings['password'] = password + elif not ssh_key_value and not password: # default to ssh + ssh_key_value = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub') + + if ssh_key_value: + protected_settings['ssh_key'] = read_content_if_is_file(ssh_key_value) + + if no_wait: + return _update_linux_access_extension(cmd, vm_instance, resource_group_name, + protected_settings, no_wait) + + poller = _update_linux_access_extension(cmd, vm_instance, resource_group_name, + protected_settings) + return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'setting user', 'done')(poller) + + +def _reset_windows_admin(cmd, vm_instance, resource_group_name, username, password, no_wait=False): + '''Update the password. You can only change the password. Adding a new user is not supported. ''' + from .operations.vm_extension import VMExtensionCreate + + publisher, version, auto_upgrade = _get_access_extension_upgrade_info_aaz( + vm_instance.get('resources', []), _WINDOWS_ACCESS_EXT) + # pylint: disable=no-member + instance_name = _get_extension_instance_name_aaz(vm_instance.get('instanceView', {}), + publisher, + _WINDOWS_ACCESS_EXT, + _ACCESS_EXT_HANDLER_NAME) + + poller = VMExtensionCreate(cli_ctx=cmd.cli_ctx)(command_args={ + 'location': vm_instance['location'], + 'resource_group': resource_group_name, + 'vm_name': vm_instance['name'], + 'vm_extension_name': instance_name, + 'publisher': publisher, + 'type': _WINDOWS_ACCESS_EXT, + 'type_handler_version': version, + 'auto_upgrade_minor_version': auto_upgrade, + 'settings': {'UserName': username}, + 'protected_settings': {'Password': password}, + 'no_wait': no_wait + }) + + if no_wait: + return poller + + return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'resetting admin', 'done')(poller) + + +def set_user(cmd, resource_group_name, vm_name, username, password=None, ssh_key_value=None, + no_wait=False): + from .operations.vm import VMShow + vm = VMShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + 'vm_name': vm_name, + 'expand': 'instanceView' + }) + if _is_linux_os_aaz(vm): + return _set_linux_user(cmd, vm, resource_group_name, username, password, ssh_key_value, no_wait) + if ssh_key_value: + raise CLIError('SSH key is not appliable on a Windows VM') + return _reset_windows_admin(cmd, vm, resource_group_name, username, password, no_wait) + + +def delete_user(cmd, resource_group_name, vm_name, username, no_wait=False): + from .operations.vm import VMShow + vm = VMShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + 'vm_name': vm_name, + 'expand': 'instanceView' + }) + if not _is_linux_os_aaz(vm): + raise CLIError('Deleting a user is not supported on Windows VM') + if no_wait: + return _update_linux_access_extension(cmd, vm, resource_group_name, + {'remove_user': username}, no_wait) + + poller = _update_linux_access_extension(cmd, vm, resource_group_name, + {'remove_user': username}) + return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'deleting user', 'done')(poller) + + +def reset_linux_ssh(cmd, resource_group_name, vm_name, no_wait=False): + from .operations.vm import VMShow + vm = VMShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + 'vm_name': vm_name, + 'expand': 'instanceView' + }) + if not _is_linux_os_aaz(vm): + raise CLIError('Resetting SSH is not supported in Windows VM') + if no_wait: + return _update_linux_access_extension(cmd, vm, resource_group_name, + {'reset_ssh': True}, no_wait) + + poller = _update_linux_access_extension(cmd, vm, resource_group_name, + {'reset_ssh': True}) + return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'resetting SSH', 'done')(poller) +# endregion + + +# region VirtualMachineScaleSets +def assign_vmss_identity(cmd, resource_group_name, vmss_name, assign_identity=None, identity_role=None, + identity_role_id=None, identity_scope=None): + identity, _, external_identities, enable_local_identity = _build_identities_info(assign_identity) + from ._vm_utils import assign_identity as assign_identity_helper, UpgradeMode + + command_args = {'resource_group': resource_group_name, 'vm_scale_set_name': vmss_name} + + def getter(): + return get_vmss_by_aaz(cmd, resource_group_name, vmss_name) + + def setter(vmss, external_identities=external_identities): + if vmss.get('identity', {}).get('type', None) == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value: + identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value + elif vmss.get('identity', {}).get('type', None) == IdentityType.SYSTEM_ASSIGNED.value and external_identities: + identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value + elif vmss.get('identity', {}).get('type', None) == IdentityType.USER_ASSIGNED.value and enable_local_identity: + identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value + elif external_identities and enable_local_identity: + identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value + elif external_identities: + identity_types = IdentityType.USER_ASSIGNED.value + else: + identity_types = IdentityType.SYSTEM_ASSIGNED.value + + if identity_types == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value: + command_args['mi_system_assigned'] = "True" + command_args['mi_user_assigned'] = [] + elif identity_types == IdentityType.USER_ASSIGNED.value: + command_args['mi_user_assigned'] = [] + else: + command_args['mi_system_assigned'] = "True" + command_args['mi_user_assigned'] = [] + + if vmss.get('identity', {}).get('userAssignedIdentities', None): + for key in vmss.get('identity').get('userAssignedIdentities').keys(): + command_args['mi_user_assigned'].append(key) + + if identity.get('userAssignedIdentities'): + for key in identity.get('userAssignedIdentities', {}).keys(): + if key not in command_args['mi_user_assigned']: + command_args['mi_user_assigned'].append(key) + + from .operations.vmss import VMSSPatch + update_vmss_identity = VMSSPatch(cli_ctx=cmd.cli_ctx)(command_args=command_args) + LongRunningOperation(cmd.cli_ctx)(update_vmss_identity) + result = update_vmss_identity.result() + return result + + assign_identity_helper(cmd.cli_ctx, getter, setter, identity_role=identity_role_id, identity_scope=identity_scope) + + vmss = getter() + if vmss.get('upgradePolicy', {}).get('mode', '') == UpgradeMode.MANUAL.value: + logger.warning("With manual upgrade mode, you will need to run 'az vmss update-instances -g %s -n %s " + "--instance-ids *' to propagate the change", resource_group_name, vmss_name) + + return _construct_identity_info( + identity_scope, + identity_role, + vmss.get('identity').get('principalId') if vmss.get('identity') else None, + vmss.get('identity').get('userAssignedIdentities') if vmss.get('identity') else None) + + +# pylint: disable=too-many-locals, too-many-statements +def create_vmss(cmd, vmss_name, resource_group_name, image=None, + disable_overprovision=None, instance_count=2, + location=None, tags=None, upgrade_policy_mode='manual', validate=False, + admin_username=None, admin_password=None, authentication_type=None, + vm_sku=None, no_wait=False, + ssh_dest_key_path=None, ssh_key_value=None, generate_ssh_keys=False, ssh_key_type=None, + load_balancer=None, load_balancer_sku=None, application_gateway=None, + app_gateway_subnet_address_prefix=None, + app_gateway_sku='Standard_Large', app_gateway_capacity=10, + backend_pool_name=None, nat_pool_name=None, backend_port=None, health_probe=None, + public_ip_address=None, public_ip_address_allocation=None, + public_ip_address_dns_name=None, accelerated_networking=None, + public_ip_per_vm=False, vm_domain_name=None, dns_servers=None, nsg=None, + os_caching=None, data_caching=None, + storage_container_name='vhds', storage_sku=None, + os_type=None, os_disk_name=None, + use_unmanaged_disk=False, data_disk_sizes_gb=None, disk_info=None, + vnet_name=None, vnet_address_prefix='10.0.0.0/16', + subnet=None, subnet_address_prefix=None, + os_offer=None, os_publisher=None, os_sku=None, os_version=None, + load_balancer_type=None, app_gateway_type=None, vnet_type=None, + public_ip_address_type=None, storage_profile=None, + single_placement_group=None, custom_data=None, secrets=None, platform_fault_domain_count=None, + plan_name=None, plan_product=None, plan_publisher=None, plan_promotion_code=None, license_type=None, + assign_identity=None, identity_scope=None, identity_role=None, encryption_identity=None, + identity_role_id=None, zones=None, priority=None, eviction_policy=None, + application_security_groups=None, ultra_ssd_enabled=None, + ephemeral_os_disk=None, ephemeral_os_disk_placement=None, + proximity_placement_group=None, aux_subscriptions=None, terminate_notification_time=None, + max_price=None, computer_name_prefix=None, orchestration_mode=None, scale_in_policy=None, + os_disk_encryption_set=None, data_disk_encryption_sets=None, data_disk_iops=None, data_disk_mbps=None, + automatic_repairs_grace_period=None, specialized=None, os_disk_size_gb=None, encryption_at_host=None, + host_group=None, max_batch_instance_percent=None, max_unhealthy_instance_percent=None, + max_unhealthy_upgraded_instance_percent=None, pause_time_between_batches=None, + enable_cross_zone_upgrade=None, prioritize_unhealthy_instances=None, edge_zone=None, + user_data=None, network_api_version=None, enable_spot_restore=None, spot_restore_timeout=None, + capacity_reservation_group=None, enable_auto_update=None, patch_mode=None, enable_agent=None, + security_type=None, enable_secure_boot=None, enable_vtpm=None, automatic_repairs_action=None, + v_cpus_available=None, v_cpus_per_core=None, accept_term=None, + disable_integrity_monitoring=None, # Unused + enable_integrity_monitoring=False, enable_auto_os_upgrade=None, + os_disk_security_encryption_type=None, os_disk_secure_vm_disk_encryption_set=None, + os_disk_delete_option=None, data_disk_delete_option=None, regular_priority_count=None, + regular_priority_percentage=None, disk_controller_type=None, nat_rule_name=None, + enable_osimage_notification=None, max_surge=None, disable_integrity_monitoring_autoupgrade=False, + enable_hibernation=None, enable_proxy_agent=None, proxy_agent_mode=None, + security_posture_reference_id=None, security_posture_reference_exclude_extensions=None, + enable_resilient_creation=None, enable_resilient_deletion=None, + additional_scheduled_events=None, enable_user_reboot_scheduled_events=None, + enable_user_redeploy_scheduled_events=None, skuprofile_vmsizes=None, + skuprofile_allostrat=None, skuprofile_rank=None, + security_posture_reference_is_overridable=None, zone_balance=None, wire_server_mode=None, + imds_mode=None, add_proxy_agent_extension=None, wire_server_access_control_profile_reference_id=None, + imds_access_control_profile_reference_id=None, enable_automatic_zone_balancing=None, + automatic_zone_balancing_strategy=None, automatic_zone_balancing_behavior=None, + enable_automatic_repairs=None): + from azure.cli.core.commands.client_factory import get_subscription_id + from azure.cli.core.util import random_string, hash_string + from azure.cli.core.commands.arm import ArmTemplateBuilder + from azure.cli.command_modules.vm._template_builder import (StorageProfile, build_vmss_resource, + build_vnet_resource, build_public_ip_resource, + build_load_balancer_resource, + build_vmss_storage_account_pool_resource, + build_application_gateway_resource, + build_msi_role_assignment, build_nsg_resource, + build_nat_rule_v2) + + # The default load balancer will be expected to be changed from Basic to Standard, and Basic will be removed. + # In order to avoid breaking change which has a big impact to users, + # we use the hint to guide users to use Standard load balancer to create VMSS in the first stage. + if load_balancer_sku == 'Basic': + logger.warning(remove_basic_option_msg, "--lb-sku Standard") + + # Build up the ARM template + master_template = ArmTemplateBuilder() + + uniform_str = 'Uniform' + flexible_str = 'Flexible' + if orchestration_mode: + from azure.mgmt.core.tools import resource_id, is_valid_resource_id + + if disk_info: + storage_sku = disk_info['os'].get('storageAccountType') + + subscription_id = get_subscription_id(cmd.cli_ctx) + + if os_disk_encryption_set is not None and not is_valid_resource_id(os_disk_encryption_set): + os_disk_encryption_set = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_encryption_set) + if os_disk_secure_vm_disk_encryption_set is not None and\ + not is_valid_resource_id(os_disk_secure_vm_disk_encryption_set): + os_disk_secure_vm_disk_encryption_set = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_secure_vm_disk_encryption_set) + + if data_disk_encryption_sets is None: + data_disk_encryption_sets = [] + for i, des in enumerate(data_disk_encryption_sets): + if des is not None and not is_valid_resource_id(des): + data_disk_encryption_sets[i] = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=des) + + network_id_template = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Network') + + vmss_id = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='virtualMachineScaleSets', name=vmss_name) + + scrubbed_name = vmss_name.replace('-', '').lower()[:5] + naming_prefix = '{}{}'.format(scrubbed_name, + hash_string(vmss_id, + length=(9 - len(scrubbed_name)), + force_lower=True)) + + # determine final defaults and calculated values + tags = tags or {} + os_disk_name = os_disk_name or ('osdisk_{}'.format(hash_string(vmss_id, length=10)) + if use_unmanaged_disk else None) + load_balancer = load_balancer or '{}LB'.format(vmss_name) + app_gateway = application_gateway or '{}AG'.format(vmss_name) + backend_pool_name = backend_pool_name or '{}BEPool'.format(load_balancer or application_gateway) + + vmss_dependencies = [] + + # VNET will always be a dependency + if vnet_type == 'new': + vnet_name = vnet_name or '{}VNET'.format(vmss_name) + subnet = subnet or '{}Subnet'.format(vmss_name) + vmss_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name)) + vnet = build_vnet_resource( + cmd, vnet_name, location, tags, vnet_address_prefix, subnet, subnet_address_prefix, edge_zone=edge_zone) + if app_gateway_type: + vnet['properties']['subnets'].append({ + 'name': 'appGwSubnet', + 'properties': { + 'addressPrefix': app_gateway_subnet_address_prefix + } + }) + master_template.add_resource(vnet) + if subnet: + subnet_id = subnet if is_valid_resource_id(subnet) else \ + '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template, vnet_name, subnet) + else: + subnet_id = None + + if vnet_name: + gateway_subnet_id = ('{}/virtualNetworks/{}/subnets/appGwSubnet'.format(network_id_template, vnet_name) + if app_gateway_type == 'new' else None) + else: + gateway_subnet_id = None + + # public IP is used by either load balancer/application gateway + public_ip_address_id = None + if public_ip_address: + public_ip_address_id = (public_ip_address if is_valid_resource_id(public_ip_address) + else '{}/publicIPAddresses/{}'.format(network_id_template, + public_ip_address)) + + def _get_public_ip_address_allocation(value, sku): + if not value: + value = 'Static' if (sku and sku.lower() == 'standard') else 'Dynamic' + return value + + # Handle load balancer creation + if load_balancer_type == 'new': + vmss_dependencies.append('Microsoft.Network/loadBalancers/{}'.format(load_balancer)) + + lb_dependencies = [] + if vnet_type == 'new': + lb_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name)) + if public_ip_address_type == 'new': + public_ip_address = public_ip_address or '{}PublicIP'.format(load_balancer) + lb_dependencies.append( + 'Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address)) + master_template.add_resource(build_public_ip_resource( + cmd, public_ip_address, location, tags, + _get_public_ip_address_allocation(public_ip_address_allocation, load_balancer_sku), + public_ip_address_dns_name, load_balancer_sku, zones, edge_zone=edge_zone)) + public_ip_address_id = '{}/publicIPAddresses/{}'.format(network_id_template, + public_ip_address) + + if nat_rule_name and nat_pool_name: + from azure.cli.core.azclierror import MutuallyExclusiveArgumentError + raise MutuallyExclusiveArgumentError( + 'Please do not pass in both "--nat-pool-name" and "--nat-rule-name" parameters at the same time.' + '"--nat-rule-name" parameter is recommended') + + is_basic_lb_sku = not load_balancer_sku or load_balancer_sku.lower() != 'standard' + # calculate default names if not provided + if orchestration_mode.lower() == flexible_str.lower(): + # inbound nat pools are not supported on VMSS Flex + nat_pool_name = None + elif nat_pool_name or (not nat_rule_name and is_basic_lb_sku): + nat_pool_name = nat_pool_name or '{}NatPool'.format(load_balancer) + + if not backend_port: + backend_port = 3389 if os_type == 'windows' else 22 + + frontend_ip_name = 'loadBalancerFrontEnd' + lb_resource = build_load_balancer_resource( + cmd, load_balancer, location, tags, backend_pool_name, nat_pool_name, backend_port, + frontend_ip_name, public_ip_address_id, subnet_id, private_ip_address='', + private_ip_allocation='Dynamic', sku=load_balancer_sku, instance_count=instance_count, + disable_overprovision=disable_overprovision, edge_zone=edge_zone) + lb_resource['dependsOn'] = lb_dependencies + master_template.add_resource(lb_resource) + + # Per https://learn.microsoft.com/azure/load-balancer/load-balancer-standard-overview#nsg + if load_balancer_sku and load_balancer_sku.lower() == 'standard' and nsg is None and os_type: + nsg_name = '{}NSG'.format(vmss_name) + master_template.add_resource(build_nsg_resource( + None, nsg_name, location, tags, 'rdp' if os_type.lower() == 'windows' else 'ssh')) + nsg = "[resourceId('Microsoft.Network/networkSecurityGroups', '{}')]".format(nsg_name) + vmss_dependencies.append('Microsoft.Network/networkSecurityGroups/{}'.format(nsg_name)) + + # Since NAT rule V2 can work for both Uniform and Flex VMSS, but basic LB SKU cannot fully support it + # So when users use Standard LB SKU, CLI uses NAT rule V2 by default + if not nat_pool_name: + + if nat_rule_name and is_basic_lb_sku: + logger.warning( + 'Since the basic SKU of load balancer cannot fully support NAT rule V2, ' + 'it is recommended to specify "--lb-sku Standard" to use standard SKU instead.') + + nat_rule_name = nat_rule_name or 'NatRule' + # The nested resource must follow the pattern parent_resource_name/nested_res_name + nat_rule_name = '{}/{}'.format(load_balancer, nat_rule_name) + nat_rule = build_nat_rule_v2(cmd, nat_rule_name, location, load_balancer, frontend_ip_name, + backend_pool_name, backend_port, instance_count, disable_overprovision) + master_template.add_resource(nat_rule) + + # Or handle application gateway creation + if app_gateway_type == 'new': + vmss_dependencies.append('Microsoft.Network/applicationGateways/{}'.format(app_gateway)) + + ag_dependencies = [] + if vnet_type == 'new': + ag_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name)) + if public_ip_address_type == 'new': + public_ip_address = public_ip_address or '{}PublicIP'.format(app_gateway) + ag_dependencies.append( + 'Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address)) + master_template.add_resource(build_public_ip_resource( + cmd, public_ip_address, location, tags, + _get_public_ip_address_allocation(public_ip_address_allocation, None), public_ip_address_dns_name, + None, zones)) + public_ip_address_id = '{}/publicIPAddresses/{}'.format(network_id_template, + public_ip_address) + + # calculate default names if not provided + backend_port = backend_port or 80 + + ag_resource = build_application_gateway_resource( + cmd, app_gateway, location, tags, backend_pool_name, backend_port, 'appGwFrontendIP', + public_ip_address_id, subnet_id, gateway_subnet_id, private_ip_address='', + private_ip_allocation='Dynamic', sku=app_gateway_sku, capacity=app_gateway_capacity) + ag_resource['dependsOn'] = ag_dependencies + master_template.add_variable( + 'appGwID', + "[resourceId('Microsoft.Network/applicationGateways', '{}')]".format(app_gateway)) + master_template.add_resource(ag_resource) + + # create storage accounts if needed for unmanaged disk storage + if storage_profile == StorageProfile.SAPirImage: + master_template.add_resource(build_vmss_storage_account_pool_resource( + cmd, 'storageLoop', location, tags, storage_sku, edge_zone)) + master_template.add_variable('storageAccountNames', [ + '{}{}'.format(naming_prefix, x) for x in range(5) + ]) + master_template.add_variable('vhdContainers', [ + "[concat('https://', variables('storageAccountNames')[{}], '.blob.{}/{}')]".format( + x, cmd.cli_ctx.cloud.suffixes.storage_endpoint, storage_container_name) for x in range(5) + ]) + vmss_dependencies.append('storageLoop') + + backend_address_pool_id = None + inbound_nat_pool_id = None + if load_balancer_type or app_gateway_type: + network_balancer = load_balancer if load_balancer_type else app_gateway + balancer_type = 'loadBalancers' if load_balancer_type else 'applicationGateways' + + if is_valid_resource_id(network_balancer): + # backend address pool needed by load balancer or app gateway + backend_address_pool_id = '{}/backendAddressPools/{}'.format(network_balancer, backend_pool_name) + if nat_pool_name: + inbound_nat_pool_id = '{}/inboundNatPools/{}'.format(network_balancer, nat_pool_name) + else: + # backend address pool needed by load balancer or app gateway + backend_address_pool_id = '{}/{}/{}/backendAddressPools/{}'.format( + network_id_template, balancer_type, network_balancer, backend_pool_name) + if nat_pool_name: + inbound_nat_pool_id = '{}/{}/{}/inboundNatPools/{}'.format( + network_id_template, balancer_type, network_balancer, nat_pool_name) + + if health_probe and not is_valid_resource_id(health_probe): + health_probe = '{}/loadBalancers/{}/probes/{}'.format(network_id_template, load_balancer, health_probe) + + ip_config_name = '{}IPConfig'.format(naming_prefix) + nic_name = '{}Nic'.format(naming_prefix) + + if custom_data: + custom_data = read_content_if_is_file(custom_data) + + if user_data: + user_data = read_content_if_is_file(user_data) + + if secrets: + secrets = _merge_secrets([validate_file_or_dict(secret) for secret in secrets]) + + if computer_name_prefix is not None and isinstance(computer_name_prefix, str): + naming_prefix = computer_name_prefix + + if orchestration_mode.lower() == uniform_str.lower(): + computer_name_prefix = naming_prefix + + if os_version and os_version != 'latest': + logger.warning('You are deploying VMSS pinned to a specific image version from Azure Marketplace. ' + 'Consider using "latest" as the image version.') + + vmss_resource = build_vmss_resource( + cmd=cmd, name=vmss_name, computer_name_prefix=computer_name_prefix, location=location, tags=tags, + overprovision=not disable_overprovision if orchestration_mode.lower() == uniform_str.lower() else None, + upgrade_policy_mode=upgrade_policy_mode, vm_sku=vm_sku, + instance_count=instance_count, ip_config_name=ip_config_name, nic_name=nic_name, subnet_id=subnet_id, + public_ip_per_vm=public_ip_per_vm, vm_domain_name=vm_domain_name, dns_servers=dns_servers, nsg=nsg, + accelerated_networking=accelerated_networking, admin_username=admin_username, + authentication_type=authentication_type, storage_profile=storage_profile, os_disk_name=os_disk_name, + disk_info=disk_info, os_type=os_type, image=image, admin_password=admin_password, + ssh_key_values=ssh_key_value, ssh_key_path=ssh_dest_key_path, os_publisher=os_publisher, os_offer=os_offer, + os_sku=os_sku, os_version=os_version, backend_address_pool_id=backend_address_pool_id, + inbound_nat_pool_id=inbound_nat_pool_id, health_probe=health_probe, + single_placement_group=single_placement_group, platform_fault_domain_count=platform_fault_domain_count, + custom_data=custom_data, secrets=secrets, license_type=license_type, zones=zones, priority=priority, + eviction_policy=eviction_policy, application_security_groups=application_security_groups, + ultra_ssd_enabled=ultra_ssd_enabled, proximity_placement_group=proximity_placement_group, + terminate_notification_time=terminate_notification_time, max_price=max_price, + scale_in_policy=scale_in_policy, os_disk_encryption_set=os_disk_encryption_set, + data_disk_encryption_sets=data_disk_encryption_sets, data_disk_iops=data_disk_iops, + data_disk_mbps=data_disk_mbps, automatic_repairs_grace_period=automatic_repairs_grace_period, + specialized=specialized, os_disk_size_gb=os_disk_size_gb, encryption_at_host=encryption_at_host, + host_group=host_group, max_batch_instance_percent=max_batch_instance_percent, + max_unhealthy_instance_percent=max_unhealthy_instance_percent, + max_unhealthy_upgraded_instance_percent=max_unhealthy_upgraded_instance_percent, + pause_time_between_batches=pause_time_between_batches, enable_cross_zone_upgrade=enable_cross_zone_upgrade, + prioritize_unhealthy_instances=prioritize_unhealthy_instances, edge_zone=edge_zone, user_data=user_data, + orchestration_mode=orchestration_mode, network_api_version=network_api_version, + enable_spot_restore=enable_spot_restore, spot_restore_timeout=spot_restore_timeout, + capacity_reservation_group=capacity_reservation_group, enable_auto_update=enable_auto_update, + patch_mode=patch_mode, enable_agent=enable_agent, security_type=security_type, + enable_secure_boot=enable_secure_boot, enable_vtpm=enable_vtpm, + automatic_repairs_action=automatic_repairs_action, v_cpus_available=v_cpus_available, + v_cpus_per_core=v_cpus_per_core, os_disk_security_encryption_type=os_disk_security_encryption_type, + os_disk_secure_vm_disk_encryption_set=os_disk_secure_vm_disk_encryption_set, + os_disk_delete_option=os_disk_delete_option, regular_priority_count=regular_priority_count, + regular_priority_percentage=regular_priority_percentage, disk_controller_type=disk_controller_type, + enable_osimage_notification=enable_osimage_notification, max_surge=max_surge, + enable_hibernation=enable_hibernation, enable_auto_os_upgrade=enable_auto_os_upgrade, + enable_proxy_agent=enable_proxy_agent, proxy_agent_mode=proxy_agent_mode, + security_posture_reference_id=security_posture_reference_id, + security_posture_reference_exclude_extensions=security_posture_reference_exclude_extensions, + enable_resilient_vm_creation=enable_resilient_creation, + enable_resilient_vm_deletion=enable_resilient_deletion, + additional_scheduled_events=additional_scheduled_events, + enable_user_reboot_scheduled_events=enable_user_reboot_scheduled_events, + enable_user_redeploy_scheduled_events=enable_user_redeploy_scheduled_events, + skuprofile_vmsizes=skuprofile_vmsizes, skuprofile_allostrat=skuprofile_allostrat, + skuprofile_rank=skuprofile_rank, + security_posture_reference_is_overridable=security_posture_reference_is_overridable, + zone_balance=zone_balance, wire_server_mode=wire_server_mode, imds_mode=imds_mode, + add_proxy_agent_extension=add_proxy_agent_extension, + wire_server_access_control_profile_reference_id=wire_server_access_control_profile_reference_id, + imds_access_control_profile_reference_id=imds_access_control_profile_reference_id, + enable_automatic_zone_balancing=enable_automatic_zone_balancing, + automatic_zone_balancing_strategy=automatic_zone_balancing_strategy, + automatic_zone_balancing_behavior=automatic_zone_balancing_behavior, + enable_automatic_repairs=enable_automatic_repairs) + + vmss_resource['dependsOn'] = vmss_dependencies + + if plan_name: + vmss_resource['plan'] = { + 'name': plan_name, + 'publisher': plan_publisher, + 'product': plan_product, + 'promotionCode': plan_promotion_code + } + + enable_local_identity = None + if assign_identity is not None: + vmss_resource['identity'], _, _, enable_local_identity = _build_identities_info( + assign_identity) + if identity_scope: + role_assignment_guid = str(_gen_guid()) + master_template.add_resource(build_msi_role_assignment(vmss_name, vmss_id, identity_role_id, + role_assignment_guid, identity_scope, False)) + if encryption_identity: + if 'identity' in vmss_resource and 'userAssignedIdentities' in vmss_resource['identity'] \ + and encryption_identity.lower() in \ + (k.lower() for k in vmss_resource['identity']['userAssignedIdentities'].keys()): + + if 'virtualMachineProfile' not in vmss_resource['properties']: + vmss_resource['properties']['virtualMachineProfile'] = {} + if 'securityProfile' not in vmss_resource['properties']['virtualMachineProfile']: + vmss_resource['properties']['virtualMachineProfile']['securityProfile'] = {} + if 'encryptionIdentity' not in vmss_resource['properties']['virtualMachineProfile']['securityProfile']: + vmss_resource['properties']['virtualMachineProfile']['securityProfile']['encryptionIdentity'] = {} + + vmss_securityProfile_EncryptionIdentity \ + = vmss_resource['properties']['virtualMachineProfile']['securityProfile']['encryptionIdentity'] + + if 'userAssignedIdentityResourceId' not in vmss_securityProfile_EncryptionIdentity or \ + vmss_securityProfile_EncryptionIdentity['userAssignedIdentityResourceId'] \ + != encryption_identity: + vmss_securityProfile_EncryptionIdentity['userAssignedIdentityResourceId'] = encryption_identity + vmss_resource['properties']['virtualMachineProfile']['securityProfile']['encryptionIdentity'] \ + = vmss_securityProfile_EncryptionIdentity + else: + raise ArgumentUsageError("Encryption Identity should be an ARM Resource ID of one of the " + "user assigned identities associated to the resource") + else: + raise CLIError('usage error: --orchestration-mode (Uniform | Flexible)') + + master_template.add_resource(vmss_resource) + master_template.add_output('VMSS', vmss_name, 'Microsoft.Compute', 'virtualMachineScaleSets', + output_type='object') + + if admin_password: + master_template.add_secure_parameter('adminPassword', admin_password) + + template = master_template.build() + parameters = master_template.build_parameters() + + # deploy ARM template + deployment_name = 'vmss_deploy_' + random_string(32) + client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, + aux_subscriptions=aux_subscriptions).deployments + + DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) + properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental') + + if validate: + from azure.cli.command_modules.vm._vm_utils import log_pprint_template + log_pprint_template(template) + log_pprint_template(parameters) + + Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) + deployment = Deployment(properties=properties) + if validate: + if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES): + validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment) + return LongRunningOperation(cmd.cli_ctx)(validation_poller) + + return client.validate(resource_group_name, deployment_name, deployment) + + # creates the VMSS deployment + deployment_result = DeploymentOutputLongRunningOperation(cmd.cli_ctx)( + sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)) + + if orchestration_mode.lower() == uniform_str.lower() and assign_identity is not None: + vmss_info = get_vmss(cmd, resource_group_name, vmss_name) + if enable_local_identity and not identity_scope: + _show_missing_access_warning(resource_group_name, vmss_name, 'vmss') + deployment_result['vmss']['identity'] = _construct_identity_info(identity_scope, identity_role, + vmss_info.identity.principal_id, + vmss_info.identity.user_assigned_identities) + # Guest Attestation Extension and enable System Assigned MSI by default + is_trusted_launch = security_type and security_type.lower() == 'trustedlaunch' and\ + enable_vtpm and enable_secure_boot + is_confidential_vm = security_type and security_type.lower() == 'confidentialvm' + if (is_trusted_launch or is_confidential_vm) and enable_integrity_monitoring: + client = _compute_client_factory(cmd.cli_ctx) + vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) + vmss.virtual_machine_profile.storage_profile.image_reference = None + VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models( + 'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile') + if vmss.virtual_machine_profile.storage_profile.os_disk.os_type == 'Linux': + publisher = 'Microsoft.Azure.Security.LinuxAttestation' + if vmss.virtual_machine_profile.storage_profile.os_disk.os_type == 'Windows': + publisher = 'Microsoft.Azure.Security.WindowsAttestation' + version = _normalize_extension_version(cmd.cli_ctx, publisher, 'GuestAttestation', None, vmss.location) + ext = VirtualMachineScaleSetExtension(name='GuestAttestation', + publisher=publisher, + type_properties_type='GuestAttestation', + protected_settings=None, + type_handler_version=version, + settings=None, + auto_upgrade_minor_version=True, + provision_after_extensions=None, + enable_automatic_upgrade=not disable_integrity_monitoring_autoupgrade) + if not vmss.virtual_machine_profile.extension_profile: + vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(extensions=[]) + vmss.virtual_machine_profile.extension_profile.extensions.append(ext) + try: + LongRunningOperation(cmd.cli_ctx)(client.virtual_machine_scale_sets.begin_create_or_update( + resource_group_name, vmss_name, vmss)) + logger.info('Guest Attestation Extension has been successfully installed by default' + 'when Trusted Launch configuration is met') + VirtualMachineScaleSetVMInstanceRequiredIDs = cmd.get_models('VirtualMachineScaleSetVMInstanceRequiredIDs') + instance_ids = VirtualMachineScaleSetVMInstanceRequiredIDs(instance_ids=['*']) + LongRunningOperation(cmd.cli_ctx)(client.virtual_machine_scale_sets.begin_update_instances( + resource_group_name, vmss_name, instance_ids)) + except Exception as e: + error_type = "Trusted Launch" if is_trusted_launch else "Confidential VM" + logger.error('Failed to install Guest Attestation Extension for %s. %s', error_type, e) + + return deployment_result + + +def _build_identities_info(identities): + from ._vm_utils import MSI_LOCAL_ID + identities = identities or [] + identity_types = [] + if not identities or MSI_LOCAL_ID in identities: + identity_types.append('SystemAssigned') + external_identities = [x for x in identities if x != MSI_LOCAL_ID] + if external_identities: + identity_types.append('UserAssigned') + identity_types = ','.join(identity_types) + info = {'type': identity_types} + if external_identities: + info['userAssignedIdentities'] = {e: {} for e in external_identities} + return (info, identity_types, external_identities, 'SystemAssigned' in identity_types) + + +def deallocate_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False, hibernate=None): + from .aaz.latest.vmss import Deallocate as VmssDeallocate + from .aaz.latest.vmss.vms import Deallocate as VmssVmsDeallocate + # This is a workaround because the REST service of `VirtualMachineScaleSetVMs#begin_deallocate` + # does not accept `hibernate` at present + if instance_ids and len(instance_ids) == 1 and hibernate is None: + command_args = { + 'instance_id': instance_ids[0], + 'resource_group': resource_group_name, + 'vm_scale_set_name': vm_scale_set_name, + 'no_wait': no_wait + } + return VmssVmsDeallocate(cli_ctx=cmd.cli_ctx)(command_args=command_args) + + command_args = { + 'resource_group': resource_group_name, + 'vm_scale_set_name': vm_scale_set_name, + 'instance_ids': instance_ids, + 'no_wait': no_wait + } + if hibernate is not None: + command_args['hibernate'] = hibernate + + return VmssDeallocate(cli_ctx=cmd.cli_ctx)(command_args=command_args) + + +def get_vmss(cmd, resource_group_name, name, instance_id=None, include_user_data=False): + client = _compute_client_factory(cmd.cli_ctx) + + expand = None + if include_user_data: + expand = 'userData' + + if instance_id is not None: + if cmd.supported_api_version(min_api='2020-12-01', operation_group='virtual_machine_scale_sets'): + return client.virtual_machine_scale_set_vms.get(resource_group_name=resource_group_name, + vm_scale_set_name=name, instance_id=instance_id, + expand=expand) + return client.virtual_machine_scale_set_vms.get(resource_group_name=resource_group_name, + vm_scale_set_name=name, instance_id=instance_id) + + if cmd.supported_api_version(min_api='2021-03-01', operation_group='virtual_machine_scale_sets'): + return client.virtual_machine_scale_sets.get(resource_group_name, name, expand=expand) + return client.virtual_machine_scale_sets.get(resource_group_name, name) + + +def get_vmss_by_aaz(cmd, resource_group_name, name, instance_id=None, include_user_data=False): + from .operations.vmss import VMSSShow + from .operations.vmss_vms import VMSSVMSShow + + command_args = { + 'resource_group': resource_group_name, + 'vm_scale_set_name': name, + } + + if include_user_data: + command_args['expand'] = 'userData' + + if instance_id is not None: + command_args['instance_id'] = instance_id + return VMSSVMSShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) + return VMSSShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) + + +def _check_vmss_hyper_v_generation(cli_ctx, vmss): + hyper_v_generation = get_hyper_v_generation_from_vmss( + cli_ctx, vmss.virtual_machine_profile.storage_profile.image_reference, vmss.location) + security_profile = vmss.virtual_machine_profile.security_profile + security_type = security_profile.security_type if security_profile else None + + if hyper_v_generation == "V1" or (hyper_v_generation == "V2" and security_type is None): + logger.warning("Trusted Launch security type is supported on Hyper-V Generation 2 OS Images. " + "To know more please visit " + "https://learn.microsoft.com/en-us/azure/virtual-machines/trusted-launch") + elif hyper_v_generation == "V2" and security_type == "ConfidentialVM": + from azure.cli.core.azclierror import InvalidArgumentValueError + raise InvalidArgumentValueError("{} is already configured with {}. " + "Security Configuration cannot be updated from ConfidentialVM to " + "TrustedLaunch.".format(vmss.name, security_type)) + + +def _check_vmss_hyper_v_generation_by_aaz(cli_ctx, vmss): + hyper_v_generation = get_hyper_v_generation_from_vmss_by_aaz( + cli_ctx, vmss.get("virtualMachineProfile", {}).get("storageProfile", {}).get("imageReference", {}), vmss["location"]) # pylint: disable=line-too-long + security_profile = vmss.get("virtualMachineProfile", {}).get("securityProfile", {}) + security_type = security_profile.get("securityType", None) + + if hyper_v_generation == "V1" or (hyper_v_generation == "V2" and security_type is None): + logger.warning("Trusted Launch security type is supported on Hyper-V Generation 2 OS Images. " + "To know more please visit " + "https://learn.microsoft.com/en-us/azure/virtual-machines/trusted-launch") + elif hyper_v_generation == "V2" and security_type == "ConfidentialVM": + from azure.cli.core.azclierror import InvalidArgumentValueError + raise InvalidArgumentValueError("{} is already configured with {}. " + "Security Configuration cannot be updated from ConfidentialVM to " + "TrustedLaunch.".format(vmss["name"], security_type)) + + +def get_vmss_modified(cmd, resource_group_name, name, instance_id=None, security_type=None): + client = _compute_client_factory(cmd.cli_ctx) + if instance_id is not None: + vms = client.virtual_machine_scale_set_vms.get(resource_group_name=resource_group_name, + vm_scale_set_name=name, instance_id=instance_id) + # To avoid unnecessary permission check of image + if hasattr(vms, "storage_profile") and vms.storage_profile: + vms.storage_profile.image_reference = None + return vms + + vmss = client.virtual_machine_scale_sets.get(resource_group_name, name) + if security_type == 'TrustedLaunch': + _check_vmss_hyper_v_generation(cmd.cli_ctx, vmss) + # To avoid unnecessary permission check of image + if hasattr(vmss, "virtual_machine_profile") and vmss.virtual_machine_profile \ + and vmss.virtual_machine_profile.storage_profile: + vmss.virtual_machine_profile.storage_profile.image_reference = None + return vmss + + +def get_vmss_modified_by_aaz(cmd, resource_group_name, name, instance_id=None, security_type=None): + if instance_id is not None: + from .operations.vmss_vms import VMSSVMSShow + vms = VMSSVMSShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + "vm_scale_set_name": name, + "instance_id": instance_id + }) + + # To avoid unnecessary permission check of image + if vms.get("storageProfile", None) is not None: + vms["storageProfile"]["imageReference"] = None + return vms + + from .operations.vmss import VMSSShow + vmss = VMSSShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + "vm_scale_set_name": name, + }) + + if security_type == 'TrustedLaunch': + _check_vmss_hyper_v_generation_by_aaz(cmd.cli_ctx, vmss) + # To avoid unnecessary permission check of image + if vmss.get("virtualMachineProfile", {}).get("storageProfile", None) is not None: + vmss["virtualMachineProfile"]["storageProfile"]["imageReference"] = None + return vmss + + +def get_instances_list(cmd, resource_group_name, virtual_machine_scale_set_name, expand=None, filter=None, + select=None, pagination_limit=None, pagination_token=None, resiliency_view=False, **kwargs): + get_list_args = kwargs + get_list_args['resource_group'] = resource_group_name + get_list_args['virtual_machine_scale_set_name'] = virtual_machine_scale_set_name + get_list_args['expand'] = expand + get_list_args['filter'] = filter + get_list_args['select'] = select + get_list_args['pagination_limit'] = pagination_limit + get_list_args['pagination_token'] = pagination_token + + from .operations.vmss import VMSSListInstances + instances = VMSSListInstances(cli_ctx=cmd.cli_ctx)(command_args=get_list_args) + + if not resiliency_view: + return instances + + instances_id = [instance['instanceId'] for instance in instances] + + from .operations.vmss_vms import VMSSGetResiliencyView + return [VMSSGetResiliencyView(cli_ctx=cmd.cli_ctx)(command_args={ + 'instance_id': instance_id, + 'resource_group': resource_group_name, + 'vm_scale_set_name': virtual_machine_scale_set_name, + }) for instance_id in instances_id] + + +def get_vmss_instance_view(cmd, resource_group_name, vm_scale_set_name, instance_id=None): + if instance_id: + if instance_id == '*': + from .aaz.latest.vmss import ListInstances as VMSSListInstances + result = VMSSListInstances(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + 'virtual_machine_scale_set_name': vm_scale_set_name, + 'select': 'instanceView', + 'expand': 'instanceView', + }) + return [x.get("instanceView", None) for x in result if x is not None] + + from .aaz.latest.vmss.vms.instance_view import Show as VMSSVMSInstanceViewShow + return VMSSVMSInstanceViewShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + 'vm_scale_set_name': vm_scale_set_name, + 'instance_id': instance_id, + }) + + from .aaz.latest.vmss.instance_view import Show as VMSSInstanceViewShow + return VMSSInstanceViewShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + 'vm_scale_set_name': vm_scale_set_name, + }) + + +def list_vmss_instance_connection_info(cmd, resource_group_name, vm_scale_set_name): + from azure.mgmt.core.tools import parse_resource_id + from .operations.vmss import VMSSShow + + LBShow = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.lb").Show + PublicIPAddress = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.public_ip").Show + + command_args = { + 'resource_group': resource_group_name, + 'vm_scale_set_name': vm_scale_set_name + } + vmss = VMSSShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) + + from ._vm_utils import raise_unsupported_error_for_flex_vmss_by_aaz + raise_unsupported_error_for_flex_vmss_by_aaz( + vmss, 'This command is not available for VMSS in Flex mode. ' + 'Please use the "az network public-ip list/show" to retrieve networking information.') + + # find the load balancer + nic_configs = \ + vmss.get('virtualMachineProfile', {}).get('networkProfile', {}).get('networkInterfaceConfigurations', []) + primary_nic_config = next((n for n in nic_configs if n.get('primary')), {}) + if primary_nic_config is None: + raise CLIError('could not find a primary NIC which is needed to search to load balancer') + + res_id = None + for ip in primary_nic_config.get('ipConfigurations', []): + if len(ip.get('loadBalancerInboundNatPools', [])) > 0: + res_id = ip['loadBalancerInboundNatPools'][0].get('id') + break + if len(ip.get('loadBalancerBackendAddressPools', [])) > 0: + res_id = ip['loadBalancerBackendAddressPools'][0].get('id') + break + + if not res_id: + raise ResourceNotFoundError('No load balancer exists to retrieve public IP address') + + lb_info = parse_resource_id(res_id) + lb_name = lb_info['name'] + lb_rg = lb_info['resource_group'] + + # get public ip + lb = LBShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': lb_name, + 'resource_group': lb_rg + }) + if 'publicIPAddress' in lb['frontendIPConfigurations'][0]: + res_id = lb['frontendIPConfigurations'][0]['publicIPAddress']['id'] + public_ip_info = parse_resource_id(res_id) + public_ip_name = public_ip_info['name'] + public_ip_rg = public_ip_info['resource_group'] + public_ip = PublicIPAddress(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': public_ip_name, + 'resource_group': public_ip_rg + }) + public_ip_address = public_ip['ipAddress'] if 'ipAddress' in public_ip else None + # For NAT pool, get the frontend port and VMSS instance from inboundNatRules + is_nat_pool = True + instance_addresses = {} + for rule in lb['inboundNatRules']: + # If backend_ip_configuration does not exist, it means that NAT rule V2 is used + if 'backendIPConfiguration' not in rule or not rule['backendIPConfiguration']: + is_nat_pool = False + break + instance_id = parse_resource_id(rule['backendIPConfiguration']['id'])['child_name_1'] + instance_addresses['instance ' + instance_id] = '{}:{}'.format(public_ip_address, + rule['frontendPort']) + if is_nat_pool: + return instance_addresses + + # For NAT rule V2, get the frontend port and VMSS instance from loadBalancerBackendAddresses + for backend_address_pool in lb['backendAddressPools']: + if 'loadBalancerBackendAddresses' not in backend_address_pool or \ + not backend_address_pool['loadBalancerBackendAddresses']: + raise CLIError('There is no connection information. ' + 'If you are using NAT rule V2, please confirm whether the load balancer SKU is Standard') + + for load_balancer_backend_addresse in backend_address_pool['loadBalancerBackendAddresses']: + + network_interface_ip_configuration = load_balancer_backend_addresse['networkInterfaceIPConfiguration'] + if not network_interface_ip_configuration or 'id' not in network_interface_ip_configuration: + continue + instance_id = parse_resource_id(network_interface_ip_configuration['id'])['child_name_1'] + + if not load_balancer_backend_addresse['inboundNatRulesPortMapping']: + continue + frontend_port = load_balancer_backend_addresse['inboundNatRulesPortMapping'][0]['frontendPort'] + instance_addresses['instance ' + instance_id] = '{}:{}'.format(public_ip_address, frontend_port) + + return instance_addresses + raise CLIError('The VM scale-set uses an internal load balancer, hence no connection information') + + +def list_vmss_instance_public_ips(cmd, resource_group_name, vm_scale_set_name): + from .operations.vmss import VMSSShow + ListInstancePublicIps = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "vmss").ListInstancePublicIps + + command_args = { + 'resource_group': resource_group_name, + 'vm_scale_set_name': vm_scale_set_name + } + vmss = VMSSShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) + from ._vm_utils import raise_unsupported_error_for_flex_vmss_by_aaz + raise_unsupported_error_for_flex_vmss_by_aaz( + vmss, 'This command is not available for VMSS in Flex mode. ' + 'Please use the "az network public-ip list/show" to retrieve networking information.') + + result = ListInstancePublicIps(cli_ctx=cmd.cli_ctx)(command_args={ + 'vmss_name': vm_scale_set_name, + 'resource_group': resource_group_name + }) + # filter away over-provisioned instances which are deleted after 'create/update' returns + return [r for r in result if 'ipAddress' in r and r['ipAddress']] + + +def reimage_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, + force_update_os_disk_for_ephemeral=None, no_wait=False): + from .aaz.latest.vmss import Reimageall as VmssReimageAll, Reimage as VmssReimage + if instance_ids: + command_args = { + 'resource_group': resource_group_name, + 'vm_scale_set_name': vm_scale_set_name, + 'instance_ids': instance_ids, + 'no_wait': no_wait + } + return VmssReimageAll(cli_ctx=cmd.cli_ctx)(command_args=command_args) + + command_args = { + 'resource_group': resource_group_name, + 'vm_scale_set_name': vm_scale_set_name, + 'no_wait': no_wait + } + if force_update_os_disk_for_ephemeral is not None: + command_args['force_update_os_disk_for_ephemeral'] = force_update_os_disk_for_ephemeral + return VmssReimage(cli_ctx=cmd.cli_ctx)(command_args=command_args) + + +def restart_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False): + from .aaz.latest.vmss import Restart as VmssRestart + if not instance_ids: + instance_ids = ['*'] + command_args = { + 'resource_group': resource_group_name, + 'vm_scale_set_name': vm_scale_set_name, + 'instance_ids': instance_ids, + 'no_wait': no_wait + } + return VmssRestart(cli_ctx=cmd.cli_ctx)(command_args=command_args) + + +# pylint: disable=inconsistent-return-statements +def scale_vmss(cmd, resource_group_name, vm_scale_set_name, new_capacity, no_wait=False): + VirtualMachineScaleSet = cmd.get_models('VirtualMachineScaleSet') + client = _compute_client_factory(cmd.cli_ctx) + vmss = client.virtual_machine_scale_sets.get(resource_group_name, vm_scale_set_name) + # pylint: disable=no-member + if vmss.sku.capacity == new_capacity: + return + + vmss.sku.capacity = new_capacity + vmss_new = VirtualMachineScaleSet(location=vmss.location, sku=vmss.sku) + if vmss.extended_location is not None: + vmss_new.extended_location = vmss.extended_location + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_create_or_update, + resource_group_name, vm_scale_set_name, vmss_new) + + +def stop_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False, skip_shutdown=False): + client = _compute_client_factory(cmd.cli_ctx) + VirtualMachineScaleSetVMInstanceRequiredIDs = cmd.get_models('VirtualMachineScaleSetVMInstanceRequiredIDs') + if instance_ids is None: + instance_ids = ['*'] + instance_ids = VirtualMachineScaleSetVMInstanceRequiredIDs(instance_ids=instance_ids) + if cmd.supported_api_version(min_api='2020-06-01', operation_group='virtual_machine_scale_sets'): + return sdk_no_wait( + no_wait, client.virtual_machine_scale_sets.begin_power_off, resource_group_name, vm_scale_set_name, + vm_instance_i_ds=instance_ids, skip_shutdown=skip_shutdown) + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_power_off, resource_group_name, + vm_scale_set_name, vm_instance_i_ds=instance_ids) + + +def update_vmss_instances(cmd, resource_group_name, vm_scale_set_name, instance_ids, no_wait=False): + from .aaz.latest.vmss import Manualupgrade + return Manualupgrade(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + 'vm_scale_set_name': vm_scale_set_name, + 'instance_ids': instance_ids, + 'no_wait': no_wait, + }) + + +def update_vmss(cmd, resource_group_name, name, license_type=None, no_wait=False, instance_id=None, + protect_from_scale_in=None, protect_from_scale_set_actions=None, + enable_terminate_notification=None, terminate_notification_time=None, ultra_ssd_enabled=None, + scale_in_policy=None, priority=None, max_price=None, proximity_placement_group=None, + enable_automatic_repairs=None, automatic_repairs_grace_period=None, max_batch_instance_percent=None, + max_unhealthy_instance_percent=None, max_unhealthy_upgraded_instance_percent=None, + pause_time_between_batches=None, enable_cross_zone_upgrade=None, prioritize_unhealthy_instances=None, + user_data=None, enable_spot_restore=None, spot_restore_timeout=None, capacity_reservation_group=None, + vm_sku=None, ephemeral_os_disk_placement=None, force_deletion=None, enable_secure_boot=None, + enable_vtpm=None, automatic_repairs_action=None, v_cpus_available=None, v_cpus_per_core=None, + regular_priority_count=None, regular_priority_percentage=None, disk_controller_type=None, + enable_osimage_notification=None, custom_data=None, enable_hibernation=None, + security_type=None, enable_proxy_agent=None, proxy_agent_mode=None, + security_posture_reference_id=None, security_posture_reference_exclude_extensions=None, + max_surge=None, enable_resilient_creation=None, enable_resilient_deletion=None, + ephemeral_os_disk=None, ephemeral_os_disk_option=None, zones=None, additional_scheduled_events=None, + enable_user_reboot_scheduled_events=None, enable_user_redeploy_scheduled_events=None, + upgrade_policy_mode=None, enable_auto_os_upgrade=None, skuprofile_vmsizes=None, + skuprofile_allostrat=None, skuprofile_rank=None, + security_posture_reference_is_overridable=None, zone_balance=None, + wire_server_mode=None, imds_mode=None, add_proxy_agent_extension=None, + wire_server_access_control_profile_reference_id=None, + imds_access_control_profile_reference_id=None, enable_automatic_zone_balancing=None, + automatic_zone_balancing_strategy=None, automatic_zone_balancing_behavior=None, **kwargs): + from .operations.vmss_vms import convert_show_result_to_snake_case as vmss_vms_convert_show_result_to_snake_case + from .operations.vmss import convert_show_result_to_snake_case as vmss_convert_show_result_to_snake_case + vmss = kwargs['parameters'] + if instance_id: + vmss = vmss_vms_convert_show_result_to_snake_case(vmss) + else: + vmss = vmss_convert_show_result_to_snake_case(vmss) + + if wire_server_access_control_profile_reference_id is not None or \ + imds_access_control_profile_reference_id is not None: + from .aaz.latest.vmss import Patch as VMSSPatchUpdate + + class VMSSUpdateReferenceId(VMSSPatchUpdate): + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=False) + return result + + security_profile = {'proxy_agent_settings': {}} + if wire_server_access_control_profile_reference_id: + security_profile['proxy_agent_settings']['wire_server'] = { + 'in_vm_access_control_profile_reference_id': wire_server_access_control_profile_reference_id} + if imds_access_control_profile_reference_id: + security_profile['proxy_agent_settings']['imds'] = { + 'in_vm_access_control_profile_reference_id': imds_access_control_profile_reference_id} + + LongRunningOperation(cmd.cli_ctx)(VMSSUpdateReferenceId(cli_ctx=cmd.cli_ctx)(command_args={ + 'vm_scale_set_name': name, + 'resource_group': resource_group_name, + 'virtual_machine_profile': { + 'security_profile': security_profile + } + })) + vmss = get_vmss_modified_by_aaz(cmd, resource_group_name, name, instance_id, security_type) + if instance_id: + vmss = vmss_vms_convert_show_result_to_snake_case(vmss) + else: + vmss = vmss_convert_show_result_to_snake_case(vmss) + + if add_proxy_agent_extension is not None: + if instance_id: + if vmss.get("security_profile", None) is None: + vmss["security_profile"] = {} + if vmss["security_profile"].get("proxy_agent_settings", None) is None: + vmss["security_profile"]["proxy_agent_settings"] = {} + + vmss["security_profile"]["proxy_agent_settings"]["add_proxy_agent_extension"] = add_proxy_agent_extension + else: + if vmss.get("virtual_machine_profile", None) is None: + vmss["virtual_machine_profile"] = {} + if vmss["virtual_machine_profile"].get("security_profile", None) is None: + vmss["virtual_machine_profile"]["security_profile"] = {} + if vmss["virtual_machine_profile"]["security_profile"].get("proxy_agent_settings", None) is None: + vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"] = {} + + vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"]["add_proxy_agent_extension"] = \ + add_proxy_agent_extension + + # handle vmss instance update + from azure.cli.core.util import b64encode + + if instance_id is not None: + if license_type is not None: + vmss["license_type"] = license_type + + if user_data is not None: + vmss["user_data"] = b64encode(user_data) + + if vmss.get("protection_policy", None) is None: + vmss["protection_policy"] = {} + + if protect_from_scale_in is not None: + vmss["protection_policy"]["protect_from_scale_in"] = protect_from_scale_in + + if protect_from_scale_set_actions is not None: + vmss["protection_policy"]["protect_from_scale_set_actions"] = protect_from_scale_set_actions + + vmss["resource_group"] = resource_group_name + vmss["vm_scale_set_name"] = name + vmss["instance_id"] = instance_id + vmss["no_wait"] = no_wait + + from .operations.vmss_vms import VMSSVMSCreate + return VMSSVMSCreate(cli_ctx=cmd.cli_ctx)(command_args=vmss) + + # else handle vmss update + if license_type is not None: + if vmss.get("virtual_machine_profile", None) is None: + vmss["virtual_machine_profile"] = {} + vmss["virtual_machine_profile"]["license_type"] = license_type + + if user_data is not None: + if vmss.get("virtual_machine_profile", None) is None: + vmss["virtual_machine_profile"] = {} + vmss["virtual_machine_profile"]["user_data"] = b64encode(user_data) + + if v_cpus_available is not None or v_cpus_per_core is not None: + if vmss.get("virtual_machine_profile", None) is None: + vmss["virtual_machine_profile"] = {} + if vmss["virtual_machine_profile"].get("hardware_profile", None) is None: + vmss["virtual_machine_profile"]["hardware_profile"] = {} + if vmss["virtual_machine_profile"]["hardware_profile"].get("vm_size_properties", None) is None: + vmss["virtual_machine_profile"]["hardware_profile"]["vm_size_properties"] = {} + + if v_cpus_available is not None: + vmss["virtual_machine_profile"]["hardware_profile"]["vm_size_properties"]["v_cp_us_available"] = v_cpus_available # pylint: disable=line-too-long + if v_cpus_per_core is not None: + vmss["virtual_machine_profile"]["hardware_profile"]["vm_size_properties"]["v_cp_us_per_core"] = v_cpus_per_core # pylint: disable=line-too-long + + if capacity_reservation_group is not None: + if vmss.get("virtual_machine_profile", None) is None: + vmss["virtual_machine_profile"] = {} + if capacity_reservation_group == 'None': + capacity_reservation_group = None + + sub_resource = {"id": capacity_reservation_group} + capacity_reservation = {"capacity_reservation_group": sub_resource} + vmss["virtual_machine_profile"]["capacity_reservation"] = capacity_reservation + + if enable_terminate_notification is not None or terminate_notification_time is not None: + if vmss.get("virtual_machine_profile", None) is None: + vmss["virtual_machine_profile"] = {} + if vmss["virtual_machine_profile"].get("scheduled_events_profile", None) is None: + vmss["virtual_machine_profile"]["scheduled_events_profile"] = {} + vmss["virtual_machine_profile"]["scheduled_events_profile"]["terminate_notification_profile"] = \ + {"not_before_timeout": terminate_notification_time, + "enable": enable_terminate_notification} + + if additional_scheduled_events is not None or \ + enable_user_reboot_scheduled_events is not None or enable_user_redeploy_scheduled_events is not None: + if vmss.get("scheduled_events_policy", None) is None: + vmss["scheduled_events_policy"] = {} + + if additional_scheduled_events is not None: + if vmss["scheduled_events_policy"].get("scheduled_events_additional_publishing_targets", None) is None: + vmss["scheduled_events_policy"]["scheduled_events_additional_publishing_targets"] = {} + if vmss["scheduled_events_policy"]["scheduled_events_additional_publishing_targets"].get("event_grid_and_resource_graph", None) is None: # pylint: disable=line-too-long + vmss["scheduled_events_policy"]["scheduled_events_additional_publishing_targets"]["event_grid_and_resource_graph"] = {} # pylint: disable=line-too-long + vmss["scheduled_events_policy"]["scheduled_events_additional_publishing_targets"][ + "event_grid_and_resource_graph"]["enable"] = additional_scheduled_events + + if enable_user_redeploy_scheduled_events is not None: + if vmss["scheduled_events_policy"].get("user_initiated_redeploy", None) is None: + vmss["scheduled_events_policy"]["user_initiated_redeploy"] = {} + vmss["scheduled_events_policy"]["user_initiated_redeploy"]["automatically_approve"] \ + = enable_user_redeploy_scheduled_events + + if enable_user_reboot_scheduled_events is not None: + if vmss["scheduled_events_policy"].get("user_initiated_reboot", None) is None: + vmss["scheduled_events_policy"]["user_initiated_reboot"] = {} + vmss["scheduled_events_policy"]["user_initiated_reboot"][ + "automatically_approve"] = enable_user_reboot_scheduled_events + + if enable_osimage_notification is not None: + if vmss.get("virtual_machine_profile", None) is None: + vmss["virtual_machine_profile"] = {} + if vmss["virtual_machine_profile"].get("scheduled_events_profile", None) is None: + vmss["virtual_machine_profile"]["scheduled_events_profile"] = {} + vmss["virtual_machine_profile"]["scheduled_events_profile"]["os_image_notification_profile"] = { + "enable": enable_osimage_notification + } + + if enable_automatic_repairs is not None or automatic_repairs_grace_period is not None or automatic_repairs_action is not None: # pylint: disable=line-too-long + if vmss.get("automatic_repairs_policy", None) is None: + vmss["automatic_repairs_policy"] = {} + if enable_automatic_repairs is not None: + vmss["automatic_repairs_policy"]["enabled"] = enable_automatic_repairs + if automatic_repairs_grace_period is not None: + vmss["automatic_repairs_policy"]["grace_period"] = automatic_repairs_grace_period + if automatic_repairs_action is not None: + vmss["automatic_repairs_policy"]["repair_action"] = automatic_repairs_action + + if ultra_ssd_enabled is not None: + if vmss.get("additional_capabilities", None) is None: + vmss["additional_capabilities"] = {"ultra_ssd_enabled": ultra_ssd_enabled} + else: + vmss["additional_capabilities"]["ultra_ssd_enabled"] = ultra_ssd_enabled + + if scale_in_policy is not None or force_deletion is not None: + if vmss.get("scale_in_policy", None) is None: + vmss["scale_in_policy"] = {} + if scale_in_policy is not None: + vmss["scale_in_policy"]["rules"] = scale_in_policy + if force_deletion is not None: + vmss["scale_in_policy"]["force_deletion"] = force_deletion + + if enable_spot_restore is not None: + if vmss.get("spot_restore_policy", None) is None: + vmss["spot_restore_policy"] = {} + vmss["spot_restore_policy"]["enabled"] = enable_spot_restore + + if spot_restore_timeout is not None: + if vmss.get("spot_restore_policy", None) is None: + vmss["spot_restore_policy"] = {} + vmss["spot_restore_policy"]["restore_timeout"] = spot_restore_timeout + + if priority is not None: + if vmss.get("virtual_machine_profile", None) is None: + vmss["virtual_machine_profile"] = {} + vmss["virtual_machine_profile"]["priority"] = priority + + if max_price is not None: + if vmss.get("virtual_machine_profile", None) is None: + vmss["virtual_machine_profile"] = {} + if vmss["virtual_machine_profile"].get("billing_profile", None) is None: + vmss["virtual_machine_profile"]["billing_profile"] = {} + vmss["virtual_machine_profile"]["billing_profile"]["max_price"] = max_price + + if security_type is not None or enable_secure_boot is not None or enable_vtpm is not None: + if vmss.get("virtual_machine_profile", None) is None: + vmss["virtual_machine_profile"] = {} + + security_profile = vmss["virtual_machine_profile"].get("security_profile", {}) + prev_security_type = security_profile.get("security_type", None) + # At present, `SecurityType` has options `TrustedLaunch` and `Standard` + if security_type == 'TrustedLaunch' and prev_security_type != security_type: + vmss["virtual_machine_profile"]["security_profile"] = { + 'security_type': security_type, + 'uefi_settings': { + 'secure_boot_enabled': enable_secure_boot if enable_secure_boot is not None else False, + 'v_tpm_enabled': enable_vtpm if enable_vtpm is not None else True + } + } + elif security_type == 'Standard': + if prev_security_type == 'TrustedLaunch': + logger.warning('Turning off Trusted launch disables foundational security for your VMs. ' + 'For more information, visit https://aka.ms/TrustedLaunch') + vmss["virtual_machine_profile"]["security_profile"] = { + 'security_type': security_type, + 'uefi_settings': None + } + else: + vmss["virtual_machine_profile"]["security_profile"] = { + 'uefi_settings': { + 'secure_boot_enabled': enable_secure_boot, + 'v_tpm_enabled': enable_vtpm + }} + + if enable_proxy_agent is not None or wire_server_mode is not None or imds_mode is not None: + if vmss.get("virtual_machine_profile", None) is None: + vmss["virtual_machine_profile"] = {} + + if vmss["virtual_machine_profile"].get("security_profile", None) is None: + vmss["virtual_machine_profile"]["security_profile"] = { + "proxy_agent_settings": { + "wire_server": {}, + "imds": {} + } + } + elif vmss["virtual_machine_profile"]["security_profile"].get("proxy_agent_settings", None) is None: + vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"] = { + "wire_server": {}, "imds": {} + } + else: + if vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"].get("wire_server", None) is None: # pylint: disable=line-too-long + vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"]["wire_server"] = {} + if vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"].get("imds", None) is None: + vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"]["imds"] = {} + + if enable_proxy_agent is not None: + vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"]["enabled"] = enable_proxy_agent + if wire_server_mode is not None: + vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"]["wire_server"]["mode"] \ + = wire_server_mode + if imds_mode is not None: + vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"]["imds"]["mode"] = imds_mode + + if regular_priority_count is not None or regular_priority_percentage is not None: + if vmss.get("orchestration_mode", None) != 'Flexible': + raise ValidationError("--regular-priority-count/--regular-priority-percentage is only available for" + " VMSS with flexible orchestration mode") + if vmss.get("priority_mix_policy", None) is None: + vmss["priority_mix_policy"] = { + 'base_regular_priority_count': regular_priority_count, + 'regular_priority_percentage_above_base': regular_priority_percentage + } + else: + if regular_priority_count is not None: + vmss["priority_mix_policy"]["base_regular_priority_count"] = regular_priority_count + if regular_priority_percentage is not None: + vmss["priority_mix_policy"]["regular_priority_percentage_above_base"] = regular_priority_percentage + + if proximity_placement_group is not None: + vmss["proximity_placement_group"] = {'id': proximity_placement_group} + + # pylint: disable=too-many-boolean-expressions + if max_batch_instance_percent is not None or max_unhealthy_instance_percent is not None \ + or max_unhealthy_upgraded_instance_percent is not None or pause_time_between_batches is not None \ + or enable_cross_zone_upgrade is not None or prioritize_unhealthy_instances is not None \ + or max_surge is not None: + if vmss.get("upgrade_policy", None) is None: + vmss["upgrade_policy"] = {"rolling_upgrade_policy": None} + if vmss["upgrade_policy"].get("rolling_upgrade_policy", None) is None: + vmss["upgrade_policy"]["rolling_upgrade_policy"] = { + 'max_batch_instance_percent': max_batch_instance_percent, + 'max_unhealthy_instance_percent': max_unhealthy_instance_percent, + 'max_unhealthy_upgraded_instance_percent': max_unhealthy_upgraded_instance_percent, + 'pause_time_between_batches': pause_time_between_batches, + 'enable_cross_zone_upgrade': enable_cross_zone_upgrade, + 'prioritize_unhealthy_instances': prioritize_unhealthy_instances, + 'max_surge': max_surge + } + else: + if max_batch_instance_percent is not None: + vmss["upgrade_policy"]["rolling_upgrade_policy"]["max_batch_instance_percent"] \ + = max_batch_instance_percent + if max_unhealthy_instance_percent is not None: + vmss["upgrade_policy"]["rolling_upgrade_policy"]["max_unhealthy_instance_percent"] \ + = max_unhealthy_instance_percent + if max_unhealthy_upgraded_instance_percent is not None: + vmss["upgrade_policy"]["rolling_upgrade_policy"]["max_unhealthy_upgraded_instance_percent"] \ + = max_unhealthy_upgraded_instance_percent + if pause_time_between_batches is not None: + vmss["upgrade_policy"]["rolling_upgrade_policy"]["pause_time_between_batches"] \ + = pause_time_between_batches + if enable_cross_zone_upgrade is not None: + vmss["upgrade_policy"]["rolling_upgrade_policy"]["enable_cross_zone_upgrade"] \ + = enable_cross_zone_upgrade + if prioritize_unhealthy_instances is not None: + vmss["upgrade_policy"]["rolling_upgrade_policy"]["prioritize_unhealthy_instances"] \ + = prioritize_unhealthy_instances + if max_surge is not None: + vmss["upgrade_policy"]["rolling_upgrade_policy"]["max_surge"] = max_surge + + if upgrade_policy_mode is not None: + if vmss.get("upgrade_policy", None) is None: + vmss["upgrade_policy"] = {} + vmss["upgrade_policy"]["mode"] = upgrade_policy_mode + + if enable_auto_os_upgrade is not None: + if vmss.get("upgrade_policy", None) is None: + vmss["upgrade_policy"] = {} + if vmss["upgrade_policy"].get("automatic_os_upgrade_policy", None) is None: + vmss["upgrade_policy"]["automatic_os_upgrade_policy"] \ + = {'enable_automatic_os_upgrade': enable_auto_os_upgrade} + else: + vmss["upgrade_policy"]["automatic_os_upgrade_policy"]["enable_automatic_os_upgrade"] \ + = enable_auto_os_upgrade + + if vm_sku is not None: + if vmss.get("sku", {}).get("name", None) == vm_sku: + logger.warning("VMSS sku is already %s", vm_sku) + else: + if vmss.get("sku", None) is None: + vmss["sku"] = {} + vmss["sku"]["name"] = vm_sku + + sku_profile = {} + if skuprofile_vmsizes is not None or skuprofile_allostrat is not None: + if skuprofile_vmsizes is not None: + sku_profile_vmsizes_list = [] + for vm_size in skuprofile_vmsizes: + vmsize_obj = { + 'name': vm_size + } + sku_profile_vmsizes_list.append(vmsize_obj) + sku_profile['vm_sizes'] = sku_profile_vmsizes_list + + if skuprofile_rank: + for vm_size, rank in zip(sku_profile_vmsizes_list, skuprofile_rank): + vm_size['rank'] = rank + + if skuprofile_allostrat is not None: + sku_profile['allocation_strategy'] = skuprofile_allostrat + vmss["sku_profile"] = sku_profile + + if ephemeral_os_disk_placement is not None or ephemeral_os_disk_option is not None or ephemeral_os_disk is not None: + if vmss.get("virtual_machine_profile", None) is None: + vmss["virtual_machine_profile"] = {} + if vmss["virtual_machine_profile"].get("storage_profile", None) is None: + vmss["virtual_machine_profile"]["storage_profile"] = {} + if vmss["virtual_machine_profile"]["storage_profile"].get("os_disk", None) is None: + vmss["virtual_machine_profile"]["storage_profile"]["os_disk"] = {} + if vmss["virtual_machine_profile"]["storage_profile"]["os_disk"].get("diff_disk_settings", None) is None: + vmss["virtual_machine_profile"]["storage_profile"]["os_disk"]["diff_disk_settings"] = {} + + if ephemeral_os_disk_placement is not None: + vmss["virtual_machine_profile"]["storage_profile"]["os_disk"]["diff_disk_settings"]["placement"] \ + = ephemeral_os_disk_placement + if ephemeral_os_disk_option is not None: + vmss["virtual_machine_profile"]["storage_profile"]["os_disk"]["diff_disk_settings"]["option"] \ + = ephemeral_os_disk_option + if ephemeral_os_disk is False: + vmss["virtual_machine_profile"]["storage_profile"]["os_disk"]["diff_disk_settings"] = {} + + if disk_controller_type is not None: + if vmss.get("virtual_machine_profile", None) is None: + vmss["virtual_machine_profile"] = {} + if vmss["virtual_machine_profile"].get("storage_profile", None) is None: + vmss["virtual_machine_profile"]["storage_profile"] = {} + vmss["virtual_machine_profile"]["storage_profile"]["disk_controller_type"] = disk_controller_type + + if custom_data is not None: + if vmss.get("virtual_machine_profile", None) is None: + vmss["virtual_machine_profile"] = {} + if vmss["virtual_machine_profile"].get("os_profile", None) is None: + vmss["virtual_machine_profile"]["os_profile"] = {} + custom_data = read_content_if_is_file(custom_data) + vmss["virtual_machine_profile"]["os_profile"]["custom_data"] = b64encode(custom_data) + + if enable_hibernation is not None: + if vmss.get("additional_capabilities", None) is None: + vmss["additional_capabilities"] = {"hibernation_enabled": enable_hibernation} + else: + vmss["additional_capabilities"]["hibernation_enabled"] = enable_hibernation + + if security_posture_reference_id is not None or security_posture_reference_exclude_extensions is not None or \ + security_posture_reference_is_overridable is not None: + if vmss.get("virtual_machine_profile", None) is None: + vmss["virtual_machine_profile"] = {} + if vmss["virtual_machine_profile"].get("security_posture_reference", None) is None: + vmss["virtual_machine_profile"]["security_posture_reference"] = {} + + if security_posture_reference_id is not None: + vmss["virtual_machine_profile"]["security_posture_reference"]["id"] = security_posture_reference_id + if security_posture_reference_exclude_extensions is not None: + vmss["virtual_machine_profile"]["security_posture_reference"]["exclude_extensions"] \ + = security_posture_reference_exclude_extensions + if security_posture_reference_is_overridable is not None: + vmss["virtual_machine_profile"]["security_posture_reference"]["is_overridable"] \ + = security_posture_reference_is_overridable + + if enable_resilient_creation is not None or enable_resilient_deletion is not None: + if vmss.get("resiliency_policy", None) is None: + vmss["resiliency_policy"] = {} + if enable_resilient_creation is not None: + vmss["resiliency_policy"]["resilient_vm_creation_policy"] = {'enabled': enable_resilient_creation} + if enable_resilient_deletion is not None: + vmss["resiliency_policy"]["resilient_vm_deletion_policy"] = {'enabled': enable_resilient_deletion} + + if enable_automatic_zone_balancing is not None or automatic_zone_balancing_strategy is not None or \ + automatic_zone_balancing_behavior is not None: + if vmss.get("resiliency_policy", None) is None: + vmss["resiliency_policy"] = {} + if vmss["resiliency_policy"].get("automatic_zone_rebalancing_policy", None) is None: + vmss["resiliency_policy"]["automatic_zone_rebalancing_policy"] = {} + + if enable_automatic_zone_balancing is not None: + vmss["resiliency_policy"]["automatic_zone_rebalancing_policy"]["enabled"] = enable_automatic_zone_balancing + + if automatic_zone_balancing_strategy is not None: + vmss["resiliency_policy"]["automatic_zone_rebalancing_policy"]["rebalance_strategy"] \ + = automatic_zone_balancing_strategy + + if automatic_zone_balancing_behavior is not None: + vmss["resiliency_policy"]["automatic_zone_rebalancing_policy"]["rebalance_behavior"] \ + = automatic_zone_balancing_behavior + + if zones is not None: + vmss["zones"] = zones + + if zone_balance is not None: + vmss["zone_balance"] = zone_balance + + vmss["resource_group"] = resource_group_name + vmss["vm_scale_set_name"] = name + vmss["no_wait"] = no_wait + + from .operations.vmss import VMSSCreate + return VMSSCreate(cli_ctx=cmd.cli_ctx)(command_args=vmss) + +# endregion + + +# region VirtualMachineScaleSets Diagnostics +def set_vmss_diagnostics_extension( + cmd, resource_group_name, vmss_name, settings, protected_settings=None, version=None, + no_auto_upgrade=False): + client = _compute_client_factory(cmd.cli_ctx) + vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) + # pylint: disable=no-member + is_linux_os = _is_linux_os(vmss.virtual_machine_profile) + vm_extension_name = _LINUX_DIAG_EXT if is_linux_os else _WINDOWS_DIAG_EXT + if is_linux_os and vmss.virtual_machine_profile.extension_profile: # check incompatibles + exts = vmss.virtual_machine_profile.extension_profile.extensions or [] + major_ver = extension_mappings[_LINUX_DIAG_EXT]['version'].split('.', maxsplit=1)[0] + # For VMSS, we don't do auto-removal like VM because there is no reliable API to wait for + # the removal done before we can install the newer one + if next((e for e in exts if e.name == _LINUX_DIAG_EXT and + not e.type_handler_version.startswith(major_ver + '.')), None): + delete_cmd = 'az vmss extension delete -g {} --vmss-name {} -n {}'.format( + resource_group_name, vmss_name, vm_extension_name) + raise CLIError("There is an incompatible version of diagnostics extension installed. " + "Please remove it by running '{}', and retry. 'az vmss update-instances'" + " might be needed if with manual upgrade policy".format(delete_cmd)) + + poller = set_vmss_extension(cmd, resource_group_name, vmss_name, vm_extension_name, + extension_mappings[vm_extension_name]['publisher'], + version or extension_mappings[vm_extension_name]['version'], + settings, + protected_settings, + no_auto_upgrade) + + result = LongRunningOperation(cmd.cli_ctx)(poller) + UpgradeMode = cmd.get_models('UpgradeMode') + if vmss.upgrade_policy.mode == UpgradeMode.manual: + poller2 = update_vmss_instances(cmd, resource_group_name, vmss_name, ['*']) + LongRunningOperation(cmd.cli_ctx)(poller2) + return result +# endregion + + +# region VirtualMachineScaleSets Disks (Managed) +def attach_managed_data_disk_to_vmss(cmd, resource_group_name, vmss_name, size_gb=None, instance_id=None, lun=None, + caching=None, disk=None, sku=None): + + def _init_data_disk(storage_profile, lun, existing_disk=None): + data_disks = storage_profile.data_disks or [] + if lun is None: + lun = _get_disk_lun(data_disks) + if existing_disk is None: + data_disk = DataDisk(lun=lun, create_option=DiskCreateOptionTypes.empty, disk_size_gb=size_gb, + caching=caching, managed_disk=ManagedDiskParameters(storage_account_type=sku)) + else: + data_disk = DataDisk(lun=lun, create_option=DiskCreateOptionTypes.attach, caching=caching, + managed_disk=ManagedDiskParameters(id=existing_disk, storage_account_type=sku)) + + data_disks.append(data_disk) + storage_profile.data_disks = data_disks + + DiskCreateOptionTypes, ManagedDiskParameters = cmd.get_models( + 'DiskCreateOptionTypes', 'ManagedDiskParameters') + if disk is None: + DataDisk = cmd.get_models('VirtualMachineScaleSetDataDisk') + else: + DataDisk = cmd.get_models('DataDisk') + + client = _compute_client_factory(cmd.cli_ctx) + if instance_id is None: + vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) + # Avoid unnecessary permission error + vmss.virtual_machine_profile.storage_profile.image_reference = None + # pylint: disable=no-member + _init_data_disk(vmss.virtual_machine_profile.storage_profile, lun) + return client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name, vmss_name, vmss) + + vmss_vm = client.virtual_machine_scale_set_vms.get(resource_group_name, vmss_name, instance_id) + # Avoid unnecessary permission error + vmss_vm.storage_profile.image_reference = None + _init_data_disk(vmss_vm.storage_profile, lun, disk) + return client.virtual_machine_scale_set_vms.begin_update(resource_group_name, vmss_name, instance_id, vmss_vm) + + +def detach_disk_from_vmss(cmd, resource_group_name, vmss_name, lun, instance_id=None): + client = _compute_client_factory(cmd.cli_ctx) + if instance_id is None: + vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) + # Avoid unnecessary permission error + vmss.virtual_machine_profile.storage_profile.image_reference = None + # pylint: disable=no-member + data_disks = vmss.virtual_machine_profile.storage_profile.data_disks + else: + vmss_vm = client.virtual_machine_scale_set_vms.get(resource_group_name, vmss_name, instance_id) + # Avoid unnecessary permission error + vmss_vm.storage_profile.image_reference = None + data_disks = vmss_vm.storage_profile.data_disks + + if not data_disks: + raise CLIError("Data disk doesn't exist") + + leftovers = [d for d in data_disks if d.lun != lun] + if len(data_disks) == len(leftovers): + raise CLIError("Could not find the data disk with lun '{}'".format(lun)) + + if instance_id is None: + vmss.virtual_machine_profile.storage_profile.data_disks = leftovers + return client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name, vmss_name, vmss) + vmss_vm.storage_profile.data_disks = leftovers + return client.virtual_machine_scale_set_vms.begin_update(resource_group_name, vmss_name, instance_id, vmss_vm) +# endregion + + +# region VirtualMachineScaleSets Extensions +def delete_vmss_extension(cmd, resource_group_name, vmss_name, extension_name): + client = _compute_client_factory(cmd.cli_ctx) + vmss = client.virtual_machine_scale_sets.get(resource_group_name=resource_group_name, vm_scale_set_name=vmss_name) + # Avoid unnecessary permission error + vmss.virtual_machine_profile.storage_profile.image_reference = None + # pylint: disable=no-member + if not vmss.virtual_machine_profile.extension_profile: + raise CLIError('Scale set has no extensions to delete') + + keep_list = [e for e in vmss.virtual_machine_profile.extension_profile.extensions + if e.name != extension_name] + if len(keep_list) == len(vmss.virtual_machine_profile.extension_profile.extensions): + raise CLIError('Extension {} not found'.format(extension_name)) + + vmss.virtual_machine_profile.extension_profile.extensions = keep_list + + return client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name=resource_group_name, + vm_scale_set_name=vmss_name, parameters=vmss) + + +# pylint: disable=inconsistent-return-statements +def get_vmss_extension(cmd, resource_group_name, vmss_name, extension_name): + client = _compute_client_factory(cmd.cli_ctx) + vmss = client.virtual_machine_scale_sets.get(resource_group_name=resource_group_name, vm_scale_set_name=vmss_name) + # pylint: disable=no-member + if not vmss.virtual_machine_profile.extension_profile: + return + return next((e for e in vmss.virtual_machine_profile.extension_profile.extensions + if e.name == extension_name), None) + + +def list_vmss_extensions(cmd, resource_group_name, vmss_name): + client = _compute_client_factory(cmd.cli_ctx) + vmss = client.virtual_machine_scale_sets.get(resource_group_name=resource_group_name, vm_scale_set_name=vmss_name) + # pylint: disable=no-member + if vmss.virtual_machine_profile and vmss.virtual_machine_profile.extension_profile: + return vmss.virtual_machine_profile.extension_profile.extensions + return None + + +def set_vmss_extension(cmd, resource_group_name, vmss_name, extension_name, publisher, version=None, + settings=None, protected_settings=None, no_auto_upgrade=False, force_update=False, + no_wait=False, extension_instance_name=None, provision_after_extensions=None, + enable_auto_upgrade=None): + if not extension_instance_name: + extension_instance_name = extension_name + + auto_upgrade_extensions = ['CodeIntegrityAgent'] + if extension_name in auto_upgrade_extensions and enable_auto_upgrade is None: + enable_auto_upgrade = True + + client = _compute_client_factory(cmd.cli_ctx) + vmss = client.virtual_machine_scale_sets.get(resource_group_name=resource_group_name, vm_scale_set_name=vmss_name) + # Avoid unnecessary permission error + vmss.virtual_machine_profile.storage_profile.image_reference = None + VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models( + 'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile') + + # pylint: disable=no-member + version = _normalize_extension_version(cmd.cli_ctx, publisher, extension_name, version, vmss.location) + extension_profile = vmss.virtual_machine_profile.extension_profile + if extension_profile: + extensions = extension_profile.extensions + if extensions: + extension_profile.extensions = [x for x in extensions if + x.type_properties_type.lower() != extension_name.lower() or x.publisher.lower() != publisher.lower()] # pylint: disable=line-too-long + + if cmd.supported_api_version(min_api='2019-07-01', operation_group='virtual_machine_scale_sets'): + ext = VirtualMachineScaleSetExtension(name=extension_instance_name, + publisher=publisher, + type_properties_type=extension_name, + protected_settings=protected_settings, + type_handler_version=version, + settings=settings, + auto_upgrade_minor_version=(not no_auto_upgrade), + provision_after_extensions=provision_after_extensions, + enable_automatic_upgrade=enable_auto_upgrade) + else: + ext = VirtualMachineScaleSetExtension(name=extension_instance_name, + publisher=publisher, + type=extension_name, + protected_settings=protected_settings, + type_handler_version=version, + settings=settings, + auto_upgrade_minor_version=(not no_auto_upgrade), + provision_after_extensions=provision_after_extensions, + enable_automatic_upgrade=enable_auto_upgrade) + + if force_update: + ext.force_update_tag = str(_gen_guid()) + + if not vmss.virtual_machine_profile.extension_profile: + vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(extensions=[]) + vmss.virtual_machine_profile.extension_profile.extensions.append(ext) + + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_create_or_update, + resource_group_name, vmss_name, vmss) + + +def set_orchestration_service_state(cmd, resource_group_name, vm_scale_set_name, service_name, action, no_wait=False): + # currently service_name has only one available value "AutomaticRepairs". And SDK does not accept service_name, + # instead SDK assign it to "AutomaticRepairs" in its own logic. As there may be more service name to be supported, + # we define service_name as a required parameter here to avoid introducing a breaking change in the future. + client = _compute_client_factory(cmd.cli_ctx) + OrchestrationServiceStateInput = cmd.get_models('OrchestrationServiceStateInput') + state_input = OrchestrationServiceStateInput(service_name=service_name, action=action) + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_set_orchestration_service_state, + resource_group_name, vm_scale_set_name, state_input) + + +def upgrade_vmss_extension(cmd, resource_group_name, vm_scale_set_name, no_wait=False): + client = _compute_client_factory(cmd.cli_ctx) + return sdk_no_wait(no_wait, client.virtual_machine_scale_set_rolling_upgrades.begin_start_extension_upgrade, + resource_group_name, vm_scale_set_name) +# endregion + + +# region VirtualMachineScaleSets RunCommand +def vmss_run_command_invoke(cmd, resource_group_name, vmss_name, command_id, instance_id, scripts=None, parameters=None): # pylint: disable=line-too-long + return run_command_invoke(cmd, resource_group_name, vmss_name, command_id, scripts, parameters, instance_id) + + +def vmss_run_command_create(cmd, + resource_group_name, + vmss_name, + instance_id, + run_command_name, + location, + tags=None, + script=None, + script_uri=None, + command_id=None, + parameters=None, + protected_parameters=None, + async_execution=None, + run_as_user=None, + run_as_password=None, + timeout_in_seconds=None, + output_blob_uri=None, + error_blob_uri=None, + no_wait=False): + from .aaz.latest.vmss.run_command import Create + args = {} + args['location'] = location + args['resource_group'] = resource_group_name + args['run_command_name'] = run_command_name + args['instance_id'] = instance_id + args['vmss_name'] = vmss_name + args['no_wait'] = no_wait + if tags is not None: + args['tags'] = tags + if script is not None: + args['script'] = script + if script_uri is not None: + args['script_uri'] = script_uri + if command_id is not None: + args['command_id'] = command_id + if parameters is not None: + auto_arg_name_num = 0 + args['parameters'] = [] + for p in parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + args['parameters'].append({'name': n, 'value': v}) + if protected_parameters is not None: + auto_arg_name_num = 0 + args['protected_parameters'] = [] + for p in protected_parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + args['protected_parameters'].append({'name': n, 'value': v}) + if async_execution is not None: + args['async_execution'] = async_execution + else: + args['async_execution'] = False + if run_as_user is not None: + args['run_as_user'] = run_as_user + if run_as_password is not None: + args['run_as_password'] = run_as_password + if timeout_in_seconds is not None: + args['timeout_in_seconds'] = timeout_in_seconds + if output_blob_uri is not None: + args['output_blob_uri'] = output_blob_uri + if error_blob_uri is not None: + args['error_blob_uri'] = error_blob_uri + return Create(cli_ctx=cmd.cli_ctx)(command_args=args) + + +def vmss_run_command_update(cmd, + resource_group_name, + vmss_name, + instance_id, + run_command_name, + location, + tags=None, + script=None, + script_uri=None, + command_id=None, + parameters=None, + protected_parameters=None, + async_execution=None, + run_as_user=None, + run_as_password=None, + timeout_in_seconds=None, + output_blob_uri=None, + error_blob_uri=None, + no_wait=False): + from .aaz.latest.vmss.run_command import Update as _Update + + class Update(_Update): + def pre_instance_update(self, instance): + if tags is not None: + instance.tags = tags + if location is not None: + instance.location = location + if script is not None: + instance.properties.source.script = script + if script_uri is not None: + instance.properties.source.script_uri = script_uri + if command_id is not None: + instance.properties.source.command_id = command_id + if parameters is not None: + auto_arg_name_num = 0 + _params = [] + for p in parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + _params.append({'name': n, 'value': v}) + instance.properties.parameters = _params + if protected_parameters is not None: + auto_arg_name_num = 0 + _params = [] + for p in protected_parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + _params.append({'name': n, 'value': v}) + instance.properties.protected_parameters = _params + if async_execution is not None: + instance.properties.async_execution = async_execution + else: + instance.properties.async_execution = False + if run_as_user is not None: + instance.properties.run_as_user = run_as_user + if run_as_password is not None: + instance.properties.run_as_password = run_as_password + if timeout_in_seconds is not None: + instance.properties.timeout_in_seconds = timeout_in_seconds + if output_blob_uri is not None: + instance.properties.output_blob_uri = output_blob_uri + if error_blob_uri is not None: + instance.properties.error_blob_uri = error_blob_uri + + args = {} + args['resource_group'] = resource_group_name + args['run_command_name'] = run_command_name + args['instance_id'] = instance_id + args['vmss_name'] = vmss_name + args['no_wait'] = no_wait + + return Update(cli_ctx=cmd.cli_ctx)(command_args=args) + + +def vmss_run_command_show(cmd, + resource_group_name, + vmss_name, + instance_id, + run_command_name, + expand=None, + instance_view=False): + from .aaz.latest.vmss.run_command import Show + if instance_view: + expand = 'instanceView' + return Show(cli_ctx=cmd.cli_ctx)(command_args={ + 'resource_group': resource_group_name, + 'vmss_name': vmss_name, + 'instance_id': instance_id, + 'run_command_name': run_command_name, + 'expand': expand + }) +# endregion + + +# region VirtualMachineScaleSets Identity +def remove_vmss_identity(cmd, resource_group_name, vmss_name, identities=None): + def setter(resource_group_name, vmss_name, vmss): + command_args = { + 'resource_group': resource_group_name, + 'vm_scale_set_name': vmss_name + } + + if vmss.get('identity') and vmss['identity'].get('type') == IdentityType.USER_ASSIGNED.value: + # NOTE: The literal 'UserAssigned' is intentionally appended as a marker for + # VMSSIdentityRemove._format_content, which uses it to apply special handling + # for purely user-assigned identities. It is not a real identity resource ID. + command_args['mi_user_assigned'] = \ + list(vmss.get('identity', {}).get('userAssignedIdentities', {}).keys()) + ['UserAssigned'] + elif vmss.get('identity') and vmss['identity'].get('type') == IdentityType.SYSTEM_ASSIGNED.value: + command_args['mi_user_assigned'] = [] + command_args['mi_system_assigned'] = 'True' + elif vmss.get('identity') and vmss['identity'].get('type') == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value: + command_args['mi_user_assigned'] = list(vmss.get('identity', {}).get('userAssignedIdentities', {}).keys()) + command_args['mi_system_assigned'] = 'True' + else: + command_args['mi_user_assigned'] = [] + + from .operations.vmss import VMSSIdentityRemove + return VMSSIdentityRemove(cli_ctx=cmd.cli_ctx)(command_args=command_args) + + if identities is None: + from ._vm_utils import MSI_LOCAL_ID + identities = [MSI_LOCAL_ID] + + return _remove_identities_by_aaz(cmd, resource_group_name, vmss_name, identities, get_vmss_by_aaz, setter) +# endregion + + +def create_gallery_image(cmd, resource_group_name, gallery_name, gallery_image_name, os_type, publisher, offer, sku, + os_state='Generalized', end_of_life_date=None, privacy_statement_uri=None, + release_note_uri=None, eula=None, description=None, location=None, + minimum_cpu_core=None, maximum_cpu_core=None, minimum_memory=None, maximum_memory=None, + disallowed_disk_types=None, plan_name=None, plan_publisher=None, plan_product=None, tags=None, + hyper_v_generation='V2', features=None, architecture=None): + logger.warning( + "Starting Build (May) 2024, \"az sig image-definition create\" command will use the new default values " + "Hyper-V Generation: V2 and SecurityType: TrustedLaunchSupported." + ) + + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + + end_of_life_date = fix_gallery_image_date_info(end_of_life_date) + recommendation = None + if any([minimum_cpu_core, maximum_cpu_core, minimum_memory, maximum_memory]): + cpu_recommendation, memory_recommendation = None, None + if any([minimum_cpu_core, maximum_cpu_core]): + cpu_recommendation = { + "min": minimum_cpu_core, + "max": maximum_cpu_core, + } + if any([minimum_memory, maximum_memory]): + memory_recommendation = { + "min": minimum_memory, + "max": maximum_memory, + } + + recommendation = { + "v_cp_us": cpu_recommendation, + "memory": memory_recommendation + } + purchase_plan = None + if any([plan_name, plan_publisher, plan_product]): + purchase_plan = { + "name": plan_name, + "publisher": plan_publisher, + "product": plan_product, + } + + feature_list = None + if features: + from ._constants import COMPATIBLE_SECURITY_TYPE_VALUE, UPGRADE_SECURITY_HINT + feature_list = [] + security_type = None + for item in features.split(): + try: + key, value = item.split('=', 1) + # create Non-Trusted Launch VM Image + # The `Standard` is used for backward compatibility to allow customers to keep their current behavior + # after changing the default values to Trusted Launch VMs in the future. + if key == 'SecurityType': + security_type = True + if key == 'SecurityType' and value == COMPATIBLE_SECURITY_TYPE_VALUE: + logger.warning(UPGRADE_SECURITY_HINT) + continue + feature_list.append({ + "name": key, + "value": value, + }) + except ValueError: + raise CLIError('usage error: --features KEY=VALUE [KEY=VALUE ...]') + if security_type is None and hyper_v_generation == 'V2': + feature_list.append({ + "name": "SecurityType", + "value": "TrustedLaunchSupported", + }) + if features is None and cmd.cli_ctx.cloud.profile == 'latest' and hyper_v_generation == 'V2': + feature_list = [{ + "name": "SecurityType", + "value": "TrustedLaunchSupported", + }] + + args = { + "identifier": {"publisher": publisher, "offer": offer, "sku": sku}, + "os_type": os_type, + "os_state": os_state, + "end_of_life_date": end_of_life_date, + "recommended": recommendation, + "disallowed": {"disk_types": disallowed_disk_types}, + "purchase_plan": purchase_plan, + "location": location, + "eula": eula, + "tags": tags or {}, + "hyper_v_generation": hyper_v_generation, + "features": feature_list, + "architecture": architecture, + "resource_group": resource_group_name, + "gallery_name": gallery_name, + "gallery_image_name": gallery_image_name, + } + + from .aaz.latest.sig.image_definition import Create + return Create(cli_ctx=cmd.cli_ctx)(command_args=args) + + +def _add_aux_subscription(aux_subscriptions, resource_id): + if resource_id: + aux_subs = _parse_aux_subscriptions(resource_id) + if aux_subs and aux_subs[0] not in aux_subscriptions: + aux_subscriptions.extend(aux_subs) + + +def _get_image_version_aux_subscription(managed_image, os_snapshot, data_snapshots): + aux_subscriptions = [] + _add_aux_subscription(aux_subscriptions, managed_image) + _add_aux_subscription(aux_subscriptions, os_snapshot) + if data_snapshots: + for data_snapshot in data_snapshots: + _add_aux_subscription(aux_subscriptions, data_snapshot) + return aux_subscriptions if aux_subscriptions else None + + +def create_image_version(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version, + location=None, target_regions=None, storage_account_type=None, + end_of_life_date=None, exclude_from_latest=None, replica_count=None, tags=None, + os_snapshot=None, data_snapshots=None, managed_image=None, data_snapshot_luns=None, + target_region_encryption=None, os_vhd_uri=None, os_vhd_storage_account=None, + data_vhds_uris=None, data_vhds_luns=None, data_vhds_storage_accounts=None, + replication_mode=None, target_region_cvm_encryption=None, virtual_machine=None, + image_version=None, target_zone_encryption=None, target_edge_zones=None, + allow_replicated_location_deletion=None, block_deletion_before_end_of_life=None, + no_wait=False): + from azure.mgmt.core.tools import resource_id, is_valid_resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + end_of_life_date = fix_gallery_image_date_info(end_of_life_date) + if managed_image and not is_valid_resource_id(managed_image): + managed_image = resource_id(subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='images', name=managed_image) + if os_snapshot and not is_valid_resource_id(os_snapshot): + os_snapshot = resource_id(subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='snapshots', name=os_snapshot) + if data_snapshots: + for i, s in enumerate(data_snapshots): + if not is_valid_resource_id(data_snapshots[i]): + data_snapshots[i] = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='snapshots', name=s) + + profile = { + "exclude_from_latest": exclude_from_latest, + "end_of_life_date": end_of_life_date, + "target_regions": target_regions or [{"name": location}], + "replica_count": replica_count, + "storage_account_type": storage_account_type + } + + if target_edge_zones: + profile["target_extended_locations"] = target_edge_zones + + if replication_mode is not None: + profile["replication_mode"] = replication_mode + if not cmd.supported_api_version(min_api='2022-03-03', operation_group='gallery_image_versions'): + source = {"managed_image": {"id": managed_image}} + profile["source"] = source + + if cmd.supported_api_version(min_api='2019-07-01', operation_group='gallery_image_versions'): + if managed_image is None and os_snapshot is None and os_vhd_uri is None: + raise RequiredArgumentMissingError('usage error: Please provide --managed-image or --os-snapshot or --vhd') + + source = os_disk_image = data_disk_images = None + if virtual_machine is not None and cmd.supported_api_version(min_api='2023-07-03', + operation_group='gallery_image_versions'): + source = {"virtual_machine_id": virtual_machine} + elif managed_image is not None: + source = {"id": managed_image} + if os_snapshot is not None: + os_disk_image = {"source": {"id": os_snapshot}} + if data_snapshot_luns and not data_snapshots: + raise ArgumentUsageError('usage error: --data-snapshot-luns must be used together with --data-snapshots') + if data_snapshots: + if data_snapshot_luns and len(data_snapshots) != len(data_snapshot_luns): + raise ArgumentUsageError('usage error: Length of --data-snapshots and ' + '--data-snapshot-luns should be equal.') + if not data_snapshot_luns: + data_snapshot_luns = list(range(len(data_snapshots))) + data_disk_images = [] + for i, s in enumerate(data_snapshots): + data_disk_images.append({"source": {"id": s}, "lun": int(data_snapshot_luns[i])}) + # from vhd, only support os image now + if cmd.supported_api_version(min_api='2020-09-30', operation_group='gallery_image_versions'): + # OS disk + if os_vhd_uri and os_vhd_storage_account is None or os_vhd_uri is None and os_vhd_storage_account: + raise ArgumentUsageError('--os-vhd-uri and --os-vhd-storage-account should be used together.') + if os_vhd_uri and os_vhd_storage_account: + if not is_valid_resource_id(os_vhd_storage_account): + os_vhd_storage_account = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Storage', type='storageAccounts', name=os_vhd_storage_account) + os_disk_image = { + "source": { + "storage_account_id": os_vhd_storage_account, + "uri": os_vhd_uri + } + } + + # Data disks + if data_vhds_uris and data_vhds_storage_accounts is None or \ + data_vhds_uris is None and data_vhds_storage_accounts: + raise ArgumentUsageError('--data-vhds-uris and --data-vhds-storage-accounts should be used together.') + if data_vhds_luns and data_vhds_uris is None: + raise ArgumentUsageError('--data-vhds-luns must be used together with --data-vhds-uris') + if data_vhds_uris: + # Generate LUNs + if data_vhds_luns is None: + # 0, 1, 2, ... + data_vhds_luns = list(range(len(data_vhds_uris))) + # Check length + len_data_vhds_uris = len(data_vhds_uris) + len_data_vhds_luns = len(data_vhds_luns) + len_data_vhds_storage_accounts = len(data_vhds_storage_accounts) + if len_data_vhds_uris != len_data_vhds_luns or len_data_vhds_uris != len_data_vhds_storage_accounts: + raise ArgumentUsageError( + 'Length of --data-vhds-uris, --data-vhds-luns, --data-vhds-storage-accounts must be same.') + # Generate full storage account ID + for i, storage_account in enumerate(data_vhds_storage_accounts): + if not is_valid_resource_id(storage_account): + data_vhds_storage_accounts[i] = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Storage', type='storageAccounts', name=storage_account) + if data_disk_images is None: + data_disk_images = [] + for uri, lun, account in zip(data_vhds_uris, data_vhds_luns, data_vhds_storage_accounts): + data_disk_images.append({ + "source": {"storage_account_id": account, "uri": uri}, + "lun": lun + }) + + storage_profile = {"source": source, "os_disk_image": os_disk_image, "data_disk_images": data_disk_images} + args = { + "publishing_profile": profile, + "location": location, + "tags": tags or {}, + "storage_profile": storage_profile + } + if allow_replicated_location_deletion is not None: + args["safety_profile"] = { + "allow_deletion_of_replicated_locations": allow_replicated_location_deletion + } + if block_deletion_before_end_of_life is not None: + if "safety_profile" not in args: + args["safety_profile"] = {} + + args["safety_profile"]["block_deletion_before_end_of_life"] = block_deletion_before_end_of_life + else: + if managed_image is None: + raise RequiredArgumentMissingError('usage error: Please provide --managed-image') + args = {"publishing_profile": profile, "location": location, "tags": tags or {}} + + args["resource_group"] = resource_group_name + args["gallery_name"] = gallery_name + args["gallery_image_definition"] = gallery_image_name + args["gallery_image_version_name"] = gallery_image_version + args["no_wait"] = no_wait + + from .aaz.latest.sig.image_version import Create + return Create(cli_ctx=cmd.cli_ctx)(command_args=args) + + +def undelete_image_version(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version, + location=None, tags=None, allow_replicated_location_deletion=None): + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + + from .aaz.latest.sig import Show as _SigShow + gallery = _SigShow(cli_ctx=cmd.cli_ctx)(command_args={ + "resource_group": resource_group_name, + "gallery_name": gallery_name + }) + + if gallery.get("softDeletePolicy", None) and gallery["softDeletePolicy"].get("isSoftDeleteEnabled", None): + soft_delete = gallery["softDeletePolicy"]["isSoftDeleteEnabled"] + else: + soft_delete = None + + if not soft_delete: + from azure.cli.core.azclierror import InvalidArgumentValueError + raise InvalidArgumentValueError('soft-deletion is not enabled in Gallery \'{}\''.format(gallery_name)) + + args = { + "publishing_profile": None, + "location": location, + "tags": tags or {}, + "storage_profile": None, + } + if allow_replicated_location_deletion is not None: + args["safety_profile"] = { + "allow_deletion_of_replicated_locations": allow_replicated_location_deletion + } + + args["resource_group"] = resource_group_name + args["gallery_name"] = gallery_name + args["gallery_image_definition"] = gallery_image_name + args["gallery_image_version_name"] = gallery_image_version + + from .aaz.latest.sig.image_version import Create + return Create(cli_ctx=cmd.cli_ctx)(command_args=args) + + +def fix_gallery_image_date_info(date_info): + # here we add needed time, if only date is provided, so the setting can be accepted by servie end + if date_info and 't' not in date_info.lower(): + date_info += 'T12:59:59Z' + return date_info + + +# pylint: disable=line-too-long +def get_image_version_to_update(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version_name): + from .aaz.latest.sig.image_version import Show as SigImageVersionShow + version = SigImageVersionShow(cli_ctx=cmd.cli_ctx)(command_args={ + "resource_group": resource_group_name, + "gallery_name": gallery_name, + "gallery_image_definition": gallery_image_name, + "gallery_image_version_name": gallery_image_version_name, + }) + + # To avoid unnecessary permission check of image + if "storageProfile" not in version: + version["storageProfile"] = {} + version["storageProfile"]["source"] = None + if version["storageProfile"].get("osDiskImage", None) and \ + version["storageProfile"]["osDiskImage"].get("source", None): + version["storageProfile"]["osDiskImage"]["source"] = None + if version["storageProfile"].get("dataDiskImages", None): + for v in version["storageProfile"]["dataDiskImages"]: + if v.get("source", None): + v["source"] = None + + return version + + +def update_image_version(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version_name, + target_regions=None, replica_count=None, allow_replicated_location_deletion=None, + target_edge_zones=None, block_deletion_before_end_of_life=None, no_wait=False, **kwargs): + args = kwargs['gallery_image_version'] + + from .operations.sig_image_version import convert_show_result_to_snake_case + args = convert_show_result_to_snake_case(args) + + if target_regions: + if "publishing_profile" not in args: + args["publishing_profile"] = {} + args["publishing_profile"]["target_regions"] = target_regions + if replica_count: + if "publishing_profile" not in args: + args["publishing_profile"] = {} + args["publishing_profile"]["replica_count"] = replica_count + if args.get("storage_profile", None) and args["storage_profile"].get("source", None) is not None: + args["storage_profile"]["os_disk_image"] = args["storage_profile"]["data_disk_images"] = None + # target extended locations will be updated when --target-edge-zones is specified + if target_edge_zones is not None: + if "publishing_profile" not in args: + args["publishing_profile"] = {} + args["publishing_profile"]["target_extended_locations"] = target_edge_zones \ + if len(target_edge_zones) > 0 else None + if allow_replicated_location_deletion is not None: + if "safety_profile" not in args: + args["safety_profile"] = {} + args["safety_profile"]["allow_deletion_of_replicated_locations"] = allow_replicated_location_deletion + if block_deletion_before_end_of_life is not None: + if "safety_profile" not in args: + args["safety_profile"] = {} + args["safety_profile"]["block_deletion_before_end_of_life"] = block_deletion_before_end_of_life + + args["resource_group"] = resource_group_name + args["gallery_name"] = gallery_name + args["gallery_image_definition"] = gallery_image_name + args["gallery_image_version_name"] = gallery_image_version_name + args["no_wait"] = no_wait + + from .aaz.latest.sig.image_version import Create + return Create(cli_ctx=cmd.cli_ctx)(command_args=args) +# endregion + + +# region dedicated host +def create_dedicated_host_group(cmd, client, host_group_name, resource_group_name, platform_fault_domain_count, + automatic_placement=None, location=None, zones=None, tags=None, ultra_ssd_enabled=None): + DedicatedHostGroup = cmd.get_models('DedicatedHostGroup') + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + + host_group_params = DedicatedHostGroup(location=location, platform_fault_domain_count=platform_fault_domain_count, + support_automatic_placement=automatic_placement, zones=zones, tags=tags) + if ultra_ssd_enabled is not None: + additionalCapabilities = {'ultraSSDEnabled': ultra_ssd_enabled} + host_group_params.additional_capabilities = additionalCapabilities + + return client.create_or_update(resource_group_name, host_group_name, parameters=host_group_params) + + +def get_dedicated_host_group_instance_view(client, host_group_name, resource_group_name): + return client.get(resource_group_name, host_group_name, expand="instanceView") + + +def create_dedicated_host(cmd, host_group_name, host_name, resource_group_name, sku, platform_fault_domain=None, + auto_replace_on_failure=None, license_type=None, location=None, tags=None): + from .aaz.latest.vm.host import Create as VmHostCreate + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + command_args = { + 'host_group_name': host_group_name, + 'host_name': host_name, + 'resource_group': resource_group_name, + 'location': location, + 'sku': { + 'name': sku + } + } + + if tags: + command_args['tags'] = tags + + if auto_replace_on_failure is not None: + command_args['auto_replace_on_failure'] = auto_replace_on_failure + + if license_type: + command_args['license_type'] = license_type + + if platform_fault_domain: + command_args['platform_fault_domain'] = platform_fault_domain + + return VmHostCreate(cli_ctx=cmd.cli_ctx)(command_args=command_args) + + +def get_dedicated_host_instance_view(cmd, host_group_name, host_name, resource_group_name): + from .aaz.latest.vm.host import Show as VmHostShow + command_args = { + 'host_group_name': host_group_name, + 'host_name': host_name, + 'resource_group': resource_group_name, + 'expand': 'instanceView' + } + return VmHostShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) + +# endregion + + +# region VMMonitor +def _get_log_analytics_client(cmd): + from ._client_factory import cf_log_analytics + from azure.cli.core.commands.client_factory import get_subscription_id + subscription_id = get_subscription_id(cmd.cli_ctx) + return cf_log_analytics(cmd.cli_ctx, subscription_id) + + +def _prepare_workspace(cmd, resource_group_name, workspace): + from azure.mgmt.core.tools import is_valid_resource_id + + from azure.core.exceptions import HttpResponseError + + workspace_id = None + if not is_valid_resource_id(workspace): + workspace_name = workspace + log_client = _get_log_analytics_client(cmd) + workspace_result = None + try: + workspace_result = log_client.workspaces.get(resource_group_name, workspace_name) + except HttpResponseError: + from azure.mgmt.loganalytics.models import Workspace, WorkspaceSku, WorkspaceSkuNameEnum + sku = WorkspaceSku(name=WorkspaceSkuNameEnum.per_gb2018.value) + retention_time = 30 # default value + location = _get_resource_group_location(cmd.cli_ctx, resource_group_name) + workspace_instance = Workspace(location=location, + sku=sku, + retention_in_days=retention_time) + workspace_result = LongRunningOperation(cmd.cli_ctx)(log_client.workspaces.begin_create_or_update( + resource_group_name, + workspace_name, + workspace_instance)) + workspace_id = workspace_result.id + else: + workspace_id = workspace + return workspace_id + + +def _set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name): + from ._client_factory import cf_log_analytics_data_sources + from azure.cli.core.commands.client_factory import get_subscription_id + from azure.mgmt.loganalytics.models import DataSource + from azure.core.exceptions import HttpResponseError + + subscription_id = get_subscription_id(cmd.cli_ctx) + data_sources_client = cf_log_analytics_data_sources(cmd.cli_ctx, subscription_id) + data_source_name_template = "DataSource_{}_{}" + + default_data_sources = None + if os_type.lower() == 'linux': + from ._workspace_data_source_settings import default_linux_data_sources + default_data_sources = default_linux_data_sources + elif os_type.lower() == 'windows': + from ._workspace_data_source_settings import default_windows_data_sources + default_data_sources = default_windows_data_sources + + if default_data_sources is not None: + for data_source_kind, data_source_settings in default_data_sources.items(): + for data_source_setting in data_source_settings: + data_source = DataSource(kind=data_source_kind, + properties=data_source_setting) + data_source_name = data_source_name_template.format(data_source_kind, _gen_guid()) + try: + data_sources_client.create_or_update(resource_group_name, + workspace_name, + data_source_name, + data_source) + except HttpResponseError as ex: + logger.warning("Failed to set data source due to %s. " + "Skip this step and need manual work later.", ex.message) + else: + logger.warning("Unsupported OS type. Skip the default settings for log analytics workspace.") + + +def execute_query_for_vm(cmd, client, resource_group_name, vm_name, analytics_query, timespan=None): + """Executes a query against the Log Analytics workspace linked with a vm.""" + vm = get_vm_by_aaz(cmd, resource_group_name, vm_name) + workspace = None + extension_resources = vm.get('resources', []) + for resource in extension_resources: + if resource.get('name') in (_WINDOWS_OMS_AGENT_EXT, _LINUX_OMS_AGENT_EXT): + workspace = resource.get('settings', {}).get('workspaceId', None) + if workspace is None: + raise CLIError('Cannot find the corresponding log analytics workspace. ' + 'Please check the status of log analytics workspace.') + return client.query_workspace(workspace, analytics_query, timespan=timespan) + + +def _set_log_analytics_workspace_extension(cmd, resource_group_name, vm, vm_name, workspace_name): + is_linux_os = _is_linux_os_by_aaz(vm) + vm_extension_name = _LINUX_OMS_AGENT_EXT if is_linux_os else _WINDOWS_OMS_AGENT_EXT + log_client = _get_log_analytics_client(cmd) + customer_id = log_client.workspaces.get(resource_group_name, workspace_name).customer_id + settings = { + 'workspaceId': customer_id, + 'stopOnMultipleConnections': 'true' + } + primary_shared_key = log_client.shared_keys.get_shared_keys(resource_group_name, workspace_name).primary_shared_key + protected_settings = { + 'workspaceKey': primary_shared_key, + } + return set_extension(cmd, resource_group_name, vm_name, vm_extension_name, + extension_mappings[vm_extension_name]['publisher'], + extension_mappings[vm_extension_name]['version'], + settings, + protected_settings) +# endregion + + +# disk encryption set +def show_disk_encryption_set_identity(cmd, resource_group_name, disk_encryption_set_name): + from .aaz.latest.disk_encryption_set import Show as _Show + des = _Show(cli_ctx=cmd.cli_ctx)(command_args={ + "disk_encryption_set_name": disk_encryption_set_name, + "resource_group": resource_group_name + }) + return des.get('identity', {}) +# endregion + + +# region install patches +def install_vm_patches(cmd, resource_group_name, vm_name, maximum_duration, reboot_setting, + classifications_to_include_win=None, classifications_to_include_linux=None, + kb_numbers_to_include=None, kb_numbers_to_exclude=None, exclude_kbs_requiring_reboot=None, + package_name_masks_to_include=None, package_name_masks_to_exclude=None, + max_patch_publish_date=None, no_wait=False): + from .aaz.latest.vm import InstallPatches as VmInstallPatches + + command_args = { + 'resource_group': resource_group_name, + 'name': vm_name, + 'maximum_duration': maximum_duration, + 'reboot_setting': reboot_setting, + 'linux_parameters': { + 'classifications_to_include': classifications_to_include_linux, + 'package_name_masks_to_exclude': package_name_masks_to_exclude, + 'package_name_masks_to_include': package_name_masks_to_include + }, + 'windows_parameters': { + 'classifications_to_include': classifications_to_include_win, + 'exclude_kbs_requiring_reboot': exclude_kbs_requiring_reboot, + 'kb_numbers_to_exclude': kb_numbers_to_exclude, + 'kb_numbers_to_include': kb_numbers_to_include, + 'max_patch_publish_date': max_patch_publish_date + }, + 'no_wait': no_wait + } + + return VmInstallPatches(cli_ctx=cmd.cli_ctx)(command_args=command_args) +# endregion + + +def get_page_result(generator, marker, show_next_marker=None): + pages = generator.by_page(continuation_token=marker) # ContainerPropertiesPaged + result = list_generator(pages=pages) + + if show_next_marker: + next_marker = {"nextMarker": pages.continuation_token} + result.append(next_marker) + else: + if pages.continuation_token: + logger.warning('Next Marker:') + logger.warning(pages.continuation_token) + + return result + + +def list_generator(pages, num_results=50): + result = [] + + # get first page items + page = list(next(pages)) + result += page + + while True: + if not pages.continuation_token: + break + + # handle num results + if num_results is not None: + if num_results <= len(result): + break + + page = list(next(pages)) + result += page + + return result + + +def gallery_application_version_create(client, + resource_group_name, + gallery_name, + gallery_application_name, + gallery_application_version_name, + location, + package_file_link, + install_command, + remove_command, + tags=None, + update_command=None, + target_regions=None, + default_file_link=None, + end_of_life_date=None, + package_file_name=None, + config_file_name=None, + exclude_from=None, + no_wait=False): + gallery_application_version = {} + gallery_application_version['publishing_profile'] = {} + gallery_application_version['location'] = location + if tags is not None: + gallery_application_version['tags'] = tags + source = {} + source['media_link'] = package_file_link + if default_file_link is not None: + source['default_configuration_link'] = default_file_link + gallery_application_version['publishing_profile']['source'] = source + manage_actions = {} + manage_actions['install'] = install_command + manage_actions['remove'] = remove_command + if update_command is not None: + manage_actions['update'] = update_command + gallery_application_version['publishing_profile']['manage_actions'] = manage_actions + if target_regions is not None: + gallery_application_version['publishing_profile']['target_regions'] = target_regions + if exclude_from is not None: + gallery_application_version['publishing_profile']['exclude_from_latest'] = exclude_from + if end_of_life_date is not None: + gallery_application_version['publishing_profile']['end_of_life_date'] = end_of_life_date + settings = {} + if package_file_name is not None: + settings['package_file_name'] = package_file_name + if config_file_name is not None: + settings['config_file_name'] = config_file_name + if settings: + gallery_application_version['publishing_profile']['settings'] = settings + return sdk_no_wait(no_wait, + client.begin_create_or_update, + resource_group_name=resource_group_name, + gallery_name=gallery_name, + gallery_application_name=gallery_application_name, + gallery_application_version_name=gallery_application_version_name, + gallery_application_version=gallery_application_version) + + +def gallery_application_version_update(client, + resource_group_name, + gallery_name, + gallery_application_name, + gallery_application_version_name, + location, + package_file_link, + tags=None, + target_regions=None, + default_file_link=None, + end_of_life_date=None, + exclude_from=None, + no_wait=False): + gallery_application_version = {} + gallery_application_version['publishing_profile'] = {} + gallery_application_version['location'] = location + if tags is not None: + gallery_application_version['tags'] = tags + source = {} + source['media_link'] = package_file_link + if default_file_link is not None: + source['default_configuration_link'] = default_file_link + gallery_application_version['publishing_profile']['source'] = source + if target_regions is not None: + gallery_application_version['publishing_profile']['target_regions'] = [target_regions] + if exclude_from is not None: + gallery_application_version['publishing_profile']['exclude_from_latest'] = exclude_from + if end_of_life_date is not None: + gallery_application_version['publishing_profile']['end_of_life_date'] = end_of_life_date + return sdk_no_wait(no_wait, + client.begin_create_or_update, + resource_group_name=resource_group_name, + gallery_name=gallery_name, + gallery_application_name=gallery_application_name, + gallery_application_version_name=gallery_application_version_name, + gallery_application_version=gallery_application_version) + + +def create_capacity_reservation_group(cmd, client, resource_group_name, capacity_reservation_group_name, location=None, + tags=None, zones=None, sharing_profile=None): + CapacityReservationGroup = cmd.get_models('CapacityReservationGroup') + if sharing_profile is not None: + subscription_ids = [{'id': sub_id} for sub_id in sharing_profile] + sharing_profile = {'subscriptionIds': subscription_ids} + capacity_reservation_group = CapacityReservationGroup(location=location, tags=tags, + zones=zones, sharing_profile=sharing_profile) + return client.create_or_update(resource_group_name=resource_group_name, + capacity_reservation_group_name=capacity_reservation_group_name, + parameters=capacity_reservation_group) + + +def update_capacity_reservation_group(cmd, client, resource_group_name, capacity_reservation_group_name, tags=None, + sharing_profile=None): + CapacityReservationGroupUpdate = cmd.get_models('CapacityReservationGroupUpdate') + if sharing_profile is not None: + subscription_ids = [{'id': sub_id} for sub_id in sharing_profile] + sharing_profile = {'subscriptionIds': subscription_ids} + capacity_reservation_group = CapacityReservationGroupUpdate(tags=tags, sharing_profile=sharing_profile) + return client.update(resource_group_name=resource_group_name, + capacity_reservation_group_name=capacity_reservation_group_name, + parameters=capacity_reservation_group) + + +def show_capacity_reservation_group(client, resource_group_name, capacity_reservation_group_name, + instance_view=None): + expand = None + if instance_view: + expand = 'instanceView' + return client.get(resource_group_name=resource_group_name, + capacity_reservation_group_name=capacity_reservation_group_name, + expand=expand) + + +def set_vm_applications(cmd, vm_name, resource_group_name, application_version_ids, order_applications=False, application_configuration_overrides=None, treat_deployment_as_failure=None, enable_automatic_upgrade=None, no_wait=False): + from .aaz.latest.vm import Update as _VMUpdate + + class SetVMApplications(_VMUpdate): + def pre_operations(self): + args = self.ctx.args + args.no_wait = no_wait + + def pre_instance_update(self, instance): + instance.properties.application_profile.gallery_applications = [{"package_reference_id": avid} for avid in application_version_ids] + + if order_applications: + index = 1 + for app in instance.properties.application_profile.gallery_applications: + app["order"] = index + index += 1 + + if application_configuration_overrides: + index = 0 + for over_ride in application_configuration_overrides: + if over_ride or over_ride.lower() != 'null': + instance.properties.application_profile.gallery_applications[index]["configuration_reference"] = over_ride + index += 1 + + if treat_deployment_as_failure: + index = 0 + for treat_as_failure in treat_deployment_as_failure: + instance.properties.application_profile.gallery_applications[index]["treat_failure_as_deployment_failure"] = \ + treat_as_failure.lower() == 'true' + index += 1 + + if enable_automatic_upgrade: + index = 0 + for enable_auto_upgrade in enable_automatic_upgrade: + instance.properties.application_profile.gallery_applications[index]["enable_automatic_upgrade"] = \ + enable_auto_upgrade.lower() == 'true' + index += 1 + + def _output(self, *args, **kwargs): + from azure.cli.core.aaz import AAZUndefined, has_value + + # Resolve flatten conflict + # When the type field conflicts, the type in inner layer is ignored and the outer layer is applied + if has_value(self.ctx.vars.instance.resources): + for resource in self.ctx.vars.instance.resources: + if has_value(resource.type): + resource.type = AAZUndefined + + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + return SetVMApplications(cli_ctx=cmd.cli_ctx)(command_args={ + "resource_group": resource_group_name, + "vm_name": vm_name, + }) + + +def list_vm_applications(cmd, vm_name, resource_group_name): + try: + from .operations.vm import VMShow + vm = VMShow(cli_ctx=cmd.cli_ctx)(command_args={ + "resource_group": resource_group_name, + "vm_name": vm_name + }) + except ResourceNotFoundError: + raise ResourceNotFoundError('Could not find vm {}.'.format(vm_name)) + return vm.get("applicationProfile", {}) + + +def set_vmss_applications(cmd, vmss_name, resource_group_name, application_version_ids, order_applications=False, application_configuration_overrides=None, treat_deployment_as_failure=None, enable_automatic_upgrade=None, no_wait=False): + from .aaz.latest.vmss import Update as _VMSSUpdate + + class SetVMSSApplications(_VMSSUpdate): + def pre_operations(self): + args = self.ctx.args + args.no_wait = no_wait + + def pre_instance_update(self, instance): + instance.properties.virtualMachineProfile.application_profile.gallery_applications = [{"package_reference_id": avid} for avid in application_version_ids] + + if order_applications: + index = 1 + for app in instance.properties.virtualMachineProfile.application_profile.gallery_applications: + app["order"] = index + index += 1 + + if application_configuration_overrides: + index = 0 + for over_ride in application_configuration_overrides: + if over_ride or over_ride.lower() != 'null': + instance.properties.virtual_machine_profile.application_profile.gallery_applications[index]["configuration_reference"] = over_ride + index += 1 + + if treat_deployment_as_failure: + index = 0 + for treat_as_failure in treat_deployment_as_failure: + instance.properties.virtual_machine_profile.application_profile.gallery_applications[index]["treat_failure_as_deployment_failure"] = \ + treat_as_failure.lower() == 'true' + index += 1 + + if enable_automatic_upgrade: + index = 0 + for enable_auto_upgrade in enable_automatic_upgrade: + instance.properties.virtual_machine_profile.application_profile.gallery_applications[index]["enable_automatic_upgrade"] = \ + enable_auto_upgrade.lower() == 'true' + index += 1 + + def _output(self, *args, **kwargs): + from azure.cli.core.aaz import AAZUndefined, has_value + + # Resolve flatten conflict + # When the type field conflicts, the type in inner layer is ignored and the outer layer is applied + print(self.ctx.vars.instance.properties.virtual_machine_profile.extension_profile.extensions) + if has_value(self.ctx.vars.instance.properties.virtual_machine_profile.extension_profile.extensions): + for extension in self.ctx.vars.instance.properties.virtual_machine_profile.extension_profile.extensions: + if has_value(extension.type): + extension.type = AAZUndefined + + result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) + return result + + return SetVMSSApplications(cli_ctx=cmd.cli_ctx)(command_args={ + "resource_group": resource_group_name, + "vm_scale_set_name": vmss_name, + }) + + +def list_vmss_applications(cmd, vmss_name, resource_group_name): + from .operations.vmss import VMSSShow + try: + command_args = { + 'resource_group': resource_group_name, + 'vm_scale_set_name': vmss_name + } + vmss = VMSSShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) + except ResourceNotFoundError: + raise ResourceNotFoundError('Could not find vmss {}.'.format(vmss_name)) + return vmss.get('virtualMachineProfile', {}).get('applicationProfile', {}) + + +# region Restore point collection +def restore_point_create(cmd, + resource_group_name, + restore_point_collection_name, + restore_point_name, + exclude_disks=None, + source_restore_point=None, + consistency_mode=None, + source_os_resource=None, + os_restore_point_encryption_set=None, + os_restore_point_encryption_type=None, + source_data_disk_resource=None, + data_disk_restore_point_encryption_set=None, + data_disk_restore_point_encryption_type=None, + no_wait=False): + parameters = { + 'restore_point_collection_name': restore_point_collection_name, + 'restore_point_name': restore_point_name, + 'resource_group': resource_group_name, + 'no_wait': no_wait + } + if exclude_disks is not None: + parameters['exclude_disks'] = [] + for disk in exclude_disks: + parameters['exclude_disks'].append({'id': disk}) + if source_restore_point is not None: + parameters['source_restore_point'] = {'id': source_restore_point} + if consistency_mode is not None: + parameters['consistency_mode'] = consistency_mode + + storage_profile = {} + # Local restore point + if source_restore_point is None: + os_disk = {} + if source_os_resource is not None: + managed_disk = { + 'id': source_os_resource + } + os_disk['managed_disk'] = managed_disk + if os_restore_point_encryption_set is None and os_restore_point_encryption_type is None: + raise ArgumentUsageError('usage error: --os-restore-point-encryption-set or --os-restore-point-encryption-type must be used together with --source-os-resource') + + disk_restore_point = {} + if os_restore_point_encryption_set is not None or os_restore_point_encryption_type is not None: + encryption = {} + if os_restore_point_encryption_set is not None: + encryption['disk_encryption_set'] = { + 'id': os_restore_point_encryption_set + } + if os_restore_point_encryption_type is not None: + encryption['type'] = os_restore_point_encryption_type + + if encryption: + disk_restore_point['encryption'] = encryption + + if disk_restore_point: + os_disk['disk_restore_point'] = disk_restore_point + + if os_disk: + storage_profile['os_disk'] = os_disk + + data_disks = [] + if source_data_disk_resource is not None: + if data_disk_restore_point_encryption_set is None and data_disk_restore_point_encryption_type is None: + raise ArgumentUsageError('usage error: --data-disk-restore-point-encryption-set or --data-disk-restore-point-encryption-type must be used together with --source-data-disk-resource') + if data_disk_restore_point_encryption_set is not None and (len(source_data_disk_resource) != len(data_disk_restore_point_encryption_set)): + raise ArgumentUsageError('Length of --source-data-disk-resource, --data-disk-restore-point-encryption-set must be same.') + if data_disk_restore_point_encryption_type is not None and (len(source_data_disk_resource) != len(data_disk_restore_point_encryption_type)): + raise ArgumentUsageError('Length of --source-data-disk-resource, --data-disk-restore-point-encryption-type must be same.') + + for i, v in enumerate(source_data_disk_resource): + data_disks.append({ + 'managed_disk': { + 'id': v + }, + 'disk_restore_point': { + 'encryption': { + 'disk_encryption_set': { + 'id': data_disk_restore_point_encryption_set[i] if data_disk_restore_point_encryption_set is not None else None + }, + 'type': data_disk_restore_point_encryption_type[i] if data_disk_restore_point_encryption_type is not None else None + } + } + }) + + if data_disks: + storage_profile['data_disks'] = data_disks + + # Remote restore point + if source_restore_point is not None: + os_disk = {} + disk_restore_point = {} + if source_os_resource is not None: + source_disk_restore_point = { + 'id': source_os_resource + } + disk_restore_point['source_disk_restore_point'] = source_disk_restore_point + if os_restore_point_encryption_set is None and os_restore_point_encryption_type is None: + raise ArgumentUsageError('usage error: --os-restore-point-encryption-set or --os-restore-point-encryption-type must be used together with --source-os-resource') + + if os_restore_point_encryption_set is not None or os_restore_point_encryption_type is not None: + encryption = {} + if os_restore_point_encryption_set is not None: + encryption['disk_encryption_set'] = { + 'id': os_restore_point_encryption_set + } + if os_restore_point_encryption_type is not None: + encryption['type'] = os_restore_point_encryption_type + + if encryption: + disk_restore_point['encryption'] = encryption + if disk_restore_point: + os_disk['disk_restore_point'] = disk_restore_point + if os_disk: + storage_profile['os_disk'] = os_disk + + data_disks = [] + if source_data_disk_resource is not None: + if data_disk_restore_point_encryption_set is None and data_disk_restore_point_encryption_type is None: + raise ArgumentUsageError('usage error: --data-disk-restore-point-encryption-set or --data-disk-restore-point-encryption-type must be used together with --source-data-disk-resource') + if data_disk_restore_point_encryption_set is not None and (len(source_data_disk_resource) != len(data_disk_restore_point_encryption_set)): + raise ArgumentUsageError('Length of --source-data-disk-resource, --data-disk-restore-point-encryption-set must be same.') + if data_disk_restore_point_encryption_type is not None and (len(source_data_disk_resource) != len(data_disk_restore_point_encryption_type)): + raise ArgumentUsageError('Length of --source-data-disk-resource, --data-disk-restore-point-encryption-type must be same.') + + for i, v in enumerate(source_data_disk_resource): + data_disks.append({ + 'disk_restore_point': { + 'source_disk_restore_point': { + 'id': v + }, + 'encryption': { + 'disk_encryption_set': { + 'id': data_disk_restore_point_encryption_set[i] if data_disk_restore_point_encryption_set is not None else None + }, + 'type': data_disk_restore_point_encryption_type[i] if data_disk_restore_point_encryption_type is not None else None + } + } + }) + if data_disks: + storage_profile['data_disks'] = data_disks + + if storage_profile: + parameters['source_metadata'] = {'storage_profile': storage_profile} + + from .aaz.latest.restore_point import Create + return Create(cli_ctx=cmd.cli_ctx)(command_args=parameters) + + +def restore_point_show(cmd, + resource_group_name, + restore_point_name, + restore_point_collection_name, + expand=None, + instance_view=None): + args = { + 'resource_group': resource_group_name, + 'restore_point_collection_name': restore_point_collection_name, + 'restore_point_name': restore_point_name, + 'expand': expand + } + + if instance_view is not None: + args['expand'] = 'instanceView' + + from .aaz.latest.restore_point import Show + return Show(cli_ctx=cmd.cli_ctx)(command_args=args) + +# endRegion + + +# region Restore point collection +def restore_point_collection_show(cmd, + resource_group_name, + restore_point_collection_name, + expand=None, + restore_points=None): + from .aaz.latest.restore_point.collection import Show + args = { + "resource_group": resource_group_name, + "restore_point_collection_name": restore_point_collection_name, + } + if restore_points is not None: + args['expand'] = 'restorePoints' + return Show(cli_ctx=cmd.cli_ctx)(command_args=args) + +# endRegion + + +# region Community gallery +def sig_community_gallery_list(cmd, location=None, marker=None, show_next_marker=None): + from ._arg_client import ARGClient, QueryBody + + query_table = 'communitygalleryresources' + query_type = 'microsoft.compute/locations/communitygalleries' + + query = "{}| where type == '{}' ".format(query_table, query_type) + if location: + # Since the location data in table "communitygalleryresources" is in lowercase + # For accurate matching, we also need to convert the location in the query statement to lowercase + query = query + "| where location == '{}' ".format(location.lower()) + query_body = QueryBody(query) + + item_count_per_page = 30 + query_body.options = { + "$top": item_count_per_page + } + + if marker: + query_body.options['$skipToken'] = marker + + query_result = ARGClient(cmd.cli_ctx).send(query_body) + result = _transform_community_gallery_list_output(query_result) + + continuation_token = query_result.get('$skipToken') + + if show_next_marker: + next_marker = {"nextMarker": continuation_token} + result.append(next_marker) + else: + if continuation_token: + logger.warning('Next Marker:') + logger.warning(continuation_token) + + return result + + +def _transform_community_gallery_list_output(result): + + result_data = result.get('data') + if not result_data: + return [] + + output = [] + for data_item in result_data: + from collections import OrderedDict + output_item = OrderedDict() + output_item['id'] = data_item.get('id') + output_item['location'] = data_item.get('location') + output_item['name'] = data_item.get('name') + + properties = data_item.get('properties') + if properties: + output_item['communityMetadata'] = properties.get('communityMetadata', {}) + output_item['uniqueId'] = properties.get('identifier', {}).get('uniqueId') + + output.append(output_item) + + return output + + +def list_vm_sizes(cmd, location): + from .operations.vm import VMListSizes + return VMListSizes(cli_ctx=cmd.cli_ctx)(command_args={ + "location": location, + }) + + +# endRegion From 85cec40ddead5f0a475bf3b94d0c68652f1f4bfa Mon Sep 17 00:00:00 2001 From: william051200 Date: Thu, 5 Mar 2026 12:37:52 +0800 Subject: [PATCH 5/7] Update example --- .../azure/cli/command_modules/vm/aaz/latest/vm/host/_create.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_create.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_create.py index cc15cea42bc..3836cc3655b 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_create.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/host/_create.py @@ -46,7 +46,7 @@ def _build_arguments_schema(cls, *args, **kwargs): ) _args_schema.host_name = AAZStrArg( options=["-n", "--name", "--host-name"], - help="The name of the dedicated host .", + help="The name of the dedicated host.", required=True, ) _args_schema.resource_group = AAZResourceGroupNameArg( From f8843a94ba38ad3d9ecab0b1cea716fdace32b28 Mon Sep 17 00:00:00 2001 From: william051200 Date: Thu, 5 Mar 2026 13:53:30 +0800 Subject: [PATCH 6/7] Update test case and recordings --- .../recordings/test_dedicated_host_e2e.yaml | 4 +- .../recordings/test_vm_host_management.yaml | 629 +++++++++++------- .../vm/tests/latest/test_vm_commands.py | 4 +- 3 files changed, 386 insertions(+), 251 deletions(-) diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/latest/recordings/test_dedicated_host_e2e.yaml b/src/azure-cli/azure/cli/command_modules/vm/tests/latest/recordings/test_dedicated_host_e2e.yaml index 0cbdc555684..dc77ac6abdd 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/latest/recordings/test_dedicated_host_e2e.yaml +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/latest/recordings/test_dedicated_host_e2e.yaml @@ -475,7 +475,7 @@ interactions: - AZURECLI/2.47.0 (AAZ) azsdk-python-core/1.24.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_dedicated_host_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host?api-version=2022-11-01 + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_dedicated_host_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host?api-version=2024-11-01 response: body: string: "{\r\n \"name\": \"my-host\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_dedicated_host_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host\",\r\n @@ -3711,7 +3711,7 @@ interactions: - AZURECLI/2.47.0 (AAZ) azsdk-python-core/1.24.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_dedicated_host_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host?api-version=2022-11-01 + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_dedicated_host_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host?api-version=2024-11-01 response: body: string: "{\r\n \"name\": \"my-host\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_dedicated_host_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host\",\r\n diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/latest/recordings/test_vm_host_management.yaml b/src/azure-cli/azure/cli/command_modules/vm/tests/latest/recordings/test_vm_host_management.yaml index ce35f1bea97..44a9e5d62a4 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/latest/recordings/test_vm_host_management.yaml +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/latest/recordings/test_vm_host_management.yaml @@ -13,32 +13,35 @@ interactions: ParameterSetName: - -n -c -g User-Agent: - - AZURECLI/2.47.0 azsdk-python-azure-mgmt-resource/22.0.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vm_host_management_000001?api-version=2024-11-01 response: body: - string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001","name":"cli_test_vm_host_management_000001","type":"Microsoft.Resources/resourceGroups","location":"westus","tags":{"product":"azurecli","cause":"automation","date":"2023-05-25T23:03:42Z"},"properties":{"provisioningState":"Succeeded"}}' + string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001","name":"cli_test_vm_host_management_000001","type":"Microsoft.Resources/resourceGroups","location":"westus","tags":{"product":"azurecli","cause":"automation","test":"test_vm_host_management","date":"2026-03-05T05:52:18Z","module":"vm"},"properties":{"provisioningState":"Succeeded"}}' headers: cache-control: - no-cache content-length: - - '346' + - '393' content-type: - application/json; charset=utf-8 date: - - Thu, 25 May 2023 23:03:43 GMT + - Thu, 05 Mar 2026 05:52:23 GMT expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-ratelimit-remaining-subscription-global-reads: + - '3749' + x-msedge-ref: + - 'Ref A: 0D75A685D1E0468AB6446DBBE3A2823A Ref B: SG2AA1070303034 Ref C: 2026-03-05T05:52:23Z' status: code: 200 message: OK @@ -60,13 +63,12 @@ interactions: ParameterSetName: - -n -c -g User-Agent: - - AZURECLI/2.47.0 azsdk-python-azure-mgmt-compute/29.1.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: PUT uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group?api-version=2024-11-01 response: body: - string: "{\r\n \"name\": \"my-host-group\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/CLI_TEST_VM_HOST_MANAGEMENT_VSK5JJTFLJXLNBF6EJOQUDHOHV3DYR2RSXARMIO3BG3CNEL/providers/Microsoft.Compute/hostGroups/my-host-group\",\r\n + string: "{\r\n \"name\": \"my-host-group\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/CLI_TEST_VM_HOST_MANAGEMENT_Y4TE2FKHMDCYRNGXNCCBDXM3RPP7R62JDEADYLYQV2N27B2/providers/Microsoft.Compute/hostGroups/my-host-group\",\r\n \ \"type\": \"Microsoft.Compute/hostGroups\",\r\n \"location\": \"westus\",\r\n \ \"properties\": {\r\n \"platformFaultDomainCount\": 3,\r\n \"supportAutomaticPlacement\": false\r\n }\r\n}" @@ -78,25 +80,32 @@ interactions: content-type: - application/json; charset=utf-8 date: - - Thu, 25 May 2023 23:03:45 GMT + - Thu, 05 Mar 2026 05:52:23 GMT expires: - '-1' pragma: - no-cache - server: - - Microsoft-HTTPAPI/2.0 - - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-need-to-refresh-epl-cache: + - 'False' + x-ms-operation-identifier: + - tenantId=ed94de55-1f87-4278-9651-525e7ba467d6,objectId=eeaac044-cddc-4cec-b91d-6c1da8fcdee0/westus/8b19d799-1970-400e-bee6-274cdd04902a x-ms-ratelimit-remaining-resource: - - Microsoft.Compute/PutDeleteDedicatedHost3Min;119,Microsoft.Compute/PutDeleteDedicatedHost30Min;599 + - Microsoft.Compute/PutDeleteDedicatedHostSubscriptionMaximum;119 + x-ms-ratelimit-remaining-subscription-global-writes: + - '2999' x-ms-ratelimit-remaining-subscription-writes: - - '1199' + - '199' + x-msedge-ref: + - 'Ref A: 9850F421BF864890A803DE0B934D1B87 Ref B: SG2AA1070301031 Ref C: 2026-03-05T05:52:23Z' status: code: 201 - message: Created + message: '' - request: body: null headers: @@ -111,39 +120,41 @@ interactions: ParameterSetName: - -g User-Agent: - - AZURECLI/2.47.0 (AAZ) azsdk-python-core/1.24.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups?api-version=2019-03-01 response: body: - string: "{\r\n \"value\": [\r\n {\r\n \"name\": \"my-host-group\",\r\n - \ \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/CLI_TEST_VM_HOST_MANAGEMENT_VSK5JJTFLJXLNBF6EJOQUDHOHV3DYR2RSXARMIO3BG3CNEL/providers/Microsoft.Compute/hostGroups/my-host-group\",\r\n - \ \"type\": \"Microsoft.Compute/hostGroups\",\r\n \"location\": \"westus\",\r\n - \ \"properties\": {\r\n \"platformFaultDomainCount\": 3\r\n }\r\n - \ }\r\n ]\r\n}" + string: '{"value":[{"name":"my-host-group","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/CLI_TEST_VM_HOST_MANAGEMENT_Y4TE2FKHMDCYRNGXNCCBDXM3RPP7R62JDEADYLYQV2N27B2/providers/Microsoft.Compute/hostGroups/my-host-group","type":"Microsoft.Compute/hostGroups","location":"westus","properties":{"platformFaultDomainCount":3}}]}' headers: cache-control: - no-cache content-length: - - '428' + - '341' content-type: - application/json; charset=utf-8 date: - - Thu, 25 May 2023 23:03:45 GMT + - Thu, 05 Mar 2026 05:52:25 GMT expires: - '-1' pragma: - no-cache - server: - - Microsoft-HTTPAPI/2.0 - - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-need-to-refresh-epl-cache: + - 'False' + x-ms-original-request-ids: + - 751f8627-8468-418a-a40a-8da1060d9563 x-ms-ratelimit-remaining-resource: - - Microsoft.Compute/GetDedicatedHost3Min;249 + - Microsoft.Compute/GetDedicatedHostSubscriptionMaximum;539 + x-ms-ratelimit-remaining-subscription-global-reads: + - '3749' + x-msedge-ref: + - 'Ref A: 36E27A254EA8448990A13D6B02FA2109 Ref B: SG2AA1070303042 Ref C: 2026-03-05T05:52:25Z' status: code: 200 message: OK @@ -161,38 +172,41 @@ interactions: ParameterSetName: - -n --host-group -d -g --sku User-Agent: - - AZURECLI/2.47.0 azsdk-python-azure-mgmt-resource/22.0.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vm_host_management_000001?api-version=2024-11-01 response: body: - string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001","name":"cli_test_vm_host_management_000001","type":"Microsoft.Resources/resourceGroups","location":"westus","tags":{"product":"azurecli","cause":"automation","date":"2023-05-25T23:03:42Z"},"properties":{"provisioningState":"Succeeded"}}' + string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001","name":"cli_test_vm_host_management_000001","type":"Microsoft.Resources/resourceGroups","location":"westus","tags":{"product":"azurecli","cause":"automation","test":"test_vm_host_management","date":"2026-03-05T05:52:18Z","module":"vm"},"properties":{"provisioningState":"Succeeded"}}' headers: cache-control: - no-cache content-length: - - '346' + - '393' content-type: - application/json; charset=utf-8 date: - - Thu, 25 May 2023 23:03:46 GMT + - Thu, 05 Mar 2026 05:52:25 GMT expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-ratelimit-remaining-subscription-global-reads: + - '3749' + x-msedge-ref: + - 'Ref A: 2AC3A41F346A416D839D795C4E800FB6 Ref B: SG2AA1040512042 Ref C: 2026-03-05T05:52:25Z' status: code: 200 message: OK - request: - body: '{"location": "westus", "sku": {"name": "DSv3-Type1"}, "properties": {"platformFaultDomain": - 2}}' + body: '{"location": "westus", "properties": {"platformFaultDomain": 2}, "sku": + {"name": "DCSv2-Type1"}}' headers: Accept: - application/json @@ -203,56 +217,62 @@ interactions: Connection: - keep-alive Content-Length: - - '95' + - '96' Content-Type: - application/json ParameterSetName: - -n --host-group -d -g --sku User-Agent: - - AZURECLI/2.47.0 azsdk-python-azure-mgmt-compute/29.1.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: PUT uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host?api-version=2024-11-01 response: body: string: "{\r\n \"name\": \"my-host\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host\",\r\n \ \"type\": \"Microsoft.Compute/hostGroups/hosts\",\r\n \"location\": \"westus\",\r\n - \ \"sku\": {\r\n \"name\": \"DSv3-Type1\"\r\n },\r\n \"properties\": + \ \"sku\": {\r\n \"name\": \"DCSv2-Type1\"\r\n },\r\n \"properties\": {\r\n \"platformFaultDomain\": 2,\r\n \"autoReplaceOnFailure\": true,\r\n - \ \"hostId\": \"6d6cbc99-3866-4077-a0c2-e6561d64b989\",\r\n \"provisioningState\": - \"Creating\",\r\n \"timeCreated\": \"2023-05-25T23:03:47.8470658+00:00\"\r\n + \ \"hostId\": \"d540fa68-5e31-412d-9ec8-95e529c2df3c\",\r\n \"provisioningState\": + \"Creating\",\r\n \"timeCreated\": \"2026-03-05T05:52:26.8906734+00:00\"\r\n \ }\r\n}" headers: azure-asyncnotification: - Enabled azure-asyncoperation: - - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Compute/locations/westus/operations/7bdcf430-81d6-4fdf-ad56-7adf8d956e85?p=571046f6-b640-41c1-86f7-f9f044b5adf9&api-version=2024-11-01 + - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Compute/locations/westus/operations/6e9f9185-e5f5-46a0-857d-43d133c229e5?p=7b868834-a188-4556-8f0e-38ea6fa689ce&api-version=2024-11-01&t=639082867470960290&c=MIIHhzCCBm-gAwIBAgITHgf_SDN0U0Mca1SI0QAAB_9IMzANBgkqhkiG9w0BAQsFADBEMRMwEQYKCZImiZPyLGQBGRYDR0JMMRMwEQYKCZImiZPyLGQBGRYDQU1FMRgwFgYDVQQDEw9BTUUgSW5mcmEgQ0EgMDYwHhcNMjYwMjE5MDYyODQwWhcNMjYwNTI0MjI1NzAzWjBAMT4wPAYDVQQDEzVhc3luY29wZXJhdGlvbnNpZ25pbmdjZXJ0aWZpY2F0ZS5tYW5hZ2VtZW50LmF6dXJlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3gBluMdrlD6ardxpqPmBSYIWUZhonBiNzi4-_Vw3g3Oro9RIUs6ZgPPueDGitorH-JQTqF81P_5QS92kcH5lXb6doPV1q_erI_e20ynOSwv5jOpwnB8O1qCCtpY__wHr-QcB0Lm4K2jLBpKfwOLTZXA5H1VEKxDXmEWP-EQELhaFdVIeJT0qwvEmhDGKnNn1-T2y6yZrYJaboDSEK8AB9KXc3mWLIwh8_G9twTikTy2PuHSzGUy9QLpQMImh2VVk5Ry0Pfpaypw8fNupz4-WjXfVayKHDyMN5Sbe7kFmbdxnISJ2hOyPiNlGB5nUXpHRpMys37mof_UwKdRJcixQECAwEAAaOCBHQwggRwMCcGCSsGAQQBgjcVCgQaMBgwCgYIKwYBBQUHAwEwCgYIKwYBBQUHAwIwPQYJKwYBBAGCNxUHBDAwLgYmKwYBBAGCNxUIhpDjDYTVtHiE8Ys-hZvdFs6dEoFghfmRS4WsmTQCAWQCAQcwggHLBggrBgEFBQcBAQSCAb0wggG5MGMGCCsGAQUFBzAChldodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20vcGtpaW5mcmEvQ2VydHMvQkwyUEtJSU5UQ0EwMi5BTUUuR0JMX0FNRSUyMEluZnJhJTIwQ0ElMjAwNi5jcnQwUwYIKwYBBQUHMAKGR2h0dHA6Ly9jcmwxLmFtZS5nYmwvYWlhL0JMMlBLSUlOVENBMDIuQU1FLkdCTF9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3J0MFMGCCsGAQUFBzAChkdodHRwOi8vY3JsMi5hbWUuZ2JsL2FpYS9CTDJQS0lJTlRDQTAyLkFNRS5HQkxfQU1FJTIwSW5mcmElMjBDQSUyMDA2LmNydDBTBggrBgEFBQcwAoZHaHR0cDovL2NybDMuYW1lLmdibC9haWEvQkwyUEtJSU5UQ0EwMi5BTUUuR0JMX0FNRSUyMEluZnJhJTIwQ0ElMjAwNi5jcnQwUwYIKwYBBQUHMAKGR2h0dHA6Ly9jcmw0LmFtZS5nYmwvYWlhL0JMMlBLSUlOVENBMDIuQU1FLkdCTF9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3J0MB0GA1UdDgQWBBSpvSUr5SOrbw_81Q-45DGZEMcVRTAOBgNVHQ8BAf8EBAMCBaAwggEmBgNVHR8EggEdMIIBGTCCARWgggERoIIBDYY_aHR0cDovL2NybC5taWNyb3NvZnQuY29tL3BraWluZnJhL0NSTC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMS5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMi5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMy5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsNC5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JsMIGdBgNVHSAEgZUwgZIwDAYKKwYBBAGCN3sBATBmBgorBgEEAYI3ewICMFgwVgYIKwYBBQUHAgIwSh5IADMAMwBlADAAMQA5ADIAMQAtADQAZAA2ADQALQA0AGYAOABjAC0AYQAwADUANQAtADUAYgBkAGEAZgBmAGQANQBlADMAMwBkMAwGCisGAQQBgjd7AwIwDAYKKwYBBAGCN3sEAjAfBgNVHSMEGDAWgBTxRmjG8cPwKy19i2rhsvm-NfzRQTAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADggEBAKPjOlpIFqUJhjTDyaKZju7P26-JxO3YKxxpiuONQQggB8Pfq85rCh4TYMRHTRF9-SaF8UFmfyVp5MKCn4fUJyrYkEfjqrWoLand203HijTSRlrvcI7H4LdCBUE4oWD21md4XzcNZ61hmhlg0z_LvEFluaWR6FJJgORgK2V5zkvf8GxYZY8SiUSX0FogWYQc0rgwrb9F3zZBApnguvbEEAfZWGixBF7eZX5U89oac2ZpJ6yt5mIyLIWUbKqOIqoTVE7ZWq0g-rZNF6SxzSWnEzmKCIVtHC_4lvCAtexAJWshMLjYvNKYi9WTvrrtCKixbaK9Y4uLtPGLUgNVtMFnJIc&s=TSQ0PHjiG3VS40EHK3AC0-wawxMiEow_bbQRy4kfeMHw7NDkGtAwYw6DtcNtyMQmuKu-o0nRSYYO3NhyoqIv_LAJqimg_pkZ6Mua1_vtT0sgWDDAbQE_MFPerujtbzrGNG6BHyQlrLzjZzGbxWsS0Fgp9hWyBny5MsQd7hH731JZHOByMi5lbyJeVWfUqnxOmWhDothtKSG1Ig6wfDyUEMilXHL6lp5bq00WLZiYE7viV3-Io2g_bY4b94dwttdT_nqnKYj3_5HeiIZlopTscrXK6rMxR779ZgLlBNSt2Ld0_ouSJqm-DdITLT10otqbpSHWhd-XPfWzlw5JDuHjDQ&h=9syV5gkAdtOfF4HueZAyg4SIOsmj8JtGJCAPYkbv5Pk cache-control: - no-cache content-length: - - '564' + - '565' content-type: - application/json; charset=utf-8 date: - - Thu, 25 May 2023 23:03:48 GMT + - Thu, 05 Mar 2026 05:52:26 GMT expires: - '-1' pragma: - no-cache - server: - - Microsoft-HTTPAPI/2.0 - - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-need-to-refresh-epl-cache: + - 'False' + x-ms-operation-identifier: + - tenantId=ed94de55-1f87-4278-9651-525e7ba467d6,objectId=eeaac044-cddc-4cec-b91d-6c1da8fcdee0/westus/793e7151-07e9-4622-9b4f-d213153330e8 x-ms-ratelimit-remaining-resource: - - Microsoft.Compute/PutDeleteDedicatedHost3Min;118,Microsoft.Compute/PutDeleteDedicatedHost30Min;598 + - Microsoft.Compute/PutDeleteDedicatedHostSubscriptionMaximum;118 + x-ms-ratelimit-remaining-subscription-global-writes: + - '2999' x-ms-ratelimit-remaining-subscription-writes: - - '1199' + - '199' + x-msedge-ref: + - 'Ref A: 6954D9D4A2304B97BE185FFC3A9E14EB Ref B: SG2AA1070301052 Ref C: 2026-03-05T05:52:26Z' status: code: 201 - message: Created + message: '' - request: body: null headers: @@ -267,15 +287,67 @@ interactions: ParameterSetName: - -n --host-group -d -g --sku User-Agent: - - AZURECLI/2.47.0 azsdk-python-azure-mgmt-compute/29.1.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Compute/locations/westus/operations/7bdcf430-81d6-4fdf-ad56-7adf8d956e85?p=571046f6-b640-41c1-86f7-f9f044b5adf9&api-version=2024-11-01 + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Compute/locations/westus/operations/6e9f9185-e5f5-46a0-857d-43d133c229e5?p=7b868834-a188-4556-8f0e-38ea6fa689ce&api-version=2024-11-01&t=639082867470960290&c=MIIHhzCCBm-gAwIBAgITHgf_SDN0U0Mca1SI0QAAB_9IMzANBgkqhkiG9w0BAQsFADBEMRMwEQYKCZImiZPyLGQBGRYDR0JMMRMwEQYKCZImiZPyLGQBGRYDQU1FMRgwFgYDVQQDEw9BTUUgSW5mcmEgQ0EgMDYwHhcNMjYwMjE5MDYyODQwWhcNMjYwNTI0MjI1NzAzWjBAMT4wPAYDVQQDEzVhc3luY29wZXJhdGlvbnNpZ25pbmdjZXJ0aWZpY2F0ZS5tYW5hZ2VtZW50LmF6dXJlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3gBluMdrlD6ardxpqPmBSYIWUZhonBiNzi4-_Vw3g3Oro9RIUs6ZgPPueDGitorH-JQTqF81P_5QS92kcH5lXb6doPV1q_erI_e20ynOSwv5jOpwnB8O1qCCtpY__wHr-QcB0Lm4K2jLBpKfwOLTZXA5H1VEKxDXmEWP-EQELhaFdVIeJT0qwvEmhDGKnNn1-T2y6yZrYJaboDSEK8AB9KXc3mWLIwh8_G9twTikTy2PuHSzGUy9QLpQMImh2VVk5Ry0Pfpaypw8fNupz4-WjXfVayKHDyMN5Sbe7kFmbdxnISJ2hOyPiNlGB5nUXpHRpMys37mof_UwKdRJcixQECAwEAAaOCBHQwggRwMCcGCSsGAQQBgjcVCgQaMBgwCgYIKwYBBQUHAwEwCgYIKwYBBQUHAwIwPQYJKwYBBAGCNxUHBDAwLgYmKwYBBAGCNxUIhpDjDYTVtHiE8Ys-hZvdFs6dEoFghfmRS4WsmTQCAWQCAQcwggHLBggrBgEFBQcBAQSCAb0wggG5MGMGCCsGAQUFBzAChldodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20vcGtpaW5mcmEvQ2VydHMvQkwyUEtJSU5UQ0EwMi5BTUUuR0JMX0FNRSUyMEluZnJhJTIwQ0ElMjAwNi5jcnQwUwYIKwYBBQUHMAKGR2h0dHA6Ly9jcmwxLmFtZS5nYmwvYWlhL0JMMlBLSUlOVENBMDIuQU1FLkdCTF9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3J0MFMGCCsGAQUFBzAChkdodHRwOi8vY3JsMi5hbWUuZ2JsL2FpYS9CTDJQS0lJTlRDQTAyLkFNRS5HQkxfQU1FJTIwSW5mcmElMjBDQSUyMDA2LmNydDBTBggrBgEFBQcwAoZHaHR0cDovL2NybDMuYW1lLmdibC9haWEvQkwyUEtJSU5UQ0EwMi5BTUUuR0JMX0FNRSUyMEluZnJhJTIwQ0ElMjAwNi5jcnQwUwYIKwYBBQUHMAKGR2h0dHA6Ly9jcmw0LmFtZS5nYmwvYWlhL0JMMlBLSUlOVENBMDIuQU1FLkdCTF9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3J0MB0GA1UdDgQWBBSpvSUr5SOrbw_81Q-45DGZEMcVRTAOBgNVHQ8BAf8EBAMCBaAwggEmBgNVHR8EggEdMIIBGTCCARWgggERoIIBDYY_aHR0cDovL2NybC5taWNyb3NvZnQuY29tL3BraWluZnJhL0NSTC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMS5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMi5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMy5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsNC5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JsMIGdBgNVHSAEgZUwgZIwDAYKKwYBBAGCN3sBATBmBgorBgEEAYI3ewICMFgwVgYIKwYBBQUHAgIwSh5IADMAMwBlADAAMQA5ADIAMQAtADQAZAA2ADQALQA0AGYAOABjAC0AYQAwADUANQAtADUAYgBkAGEAZgBmAGQANQBlADMAMwBkMAwGCisGAQQBgjd7AwIwDAYKKwYBBAGCN3sEAjAfBgNVHSMEGDAWgBTxRmjG8cPwKy19i2rhsvm-NfzRQTAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADggEBAKPjOlpIFqUJhjTDyaKZju7P26-JxO3YKxxpiuONQQggB8Pfq85rCh4TYMRHTRF9-SaF8UFmfyVp5MKCn4fUJyrYkEfjqrWoLand203HijTSRlrvcI7H4LdCBUE4oWD21md4XzcNZ61hmhlg0z_LvEFluaWR6FJJgORgK2V5zkvf8GxYZY8SiUSX0FogWYQc0rgwrb9F3zZBApnguvbEEAfZWGixBF7eZX5U89oac2ZpJ6yt5mIyLIWUbKqOIqoTVE7ZWq0g-rZNF6SxzSWnEzmKCIVtHC_4lvCAtexAJWshMLjYvNKYi9WTvrrtCKixbaK9Y4uLtPGLUgNVtMFnJIc&s=TSQ0PHjiG3VS40EHK3AC0-wawxMiEow_bbQRy4kfeMHw7NDkGtAwYw6DtcNtyMQmuKu-o0nRSYYO3NhyoqIv_LAJqimg_pkZ6Mua1_vtT0sgWDDAbQE_MFPerujtbzrGNG6BHyQlrLzjZzGbxWsS0Fgp9hWyBny5MsQd7hH731JZHOByMi5lbyJeVWfUqnxOmWhDothtKSG1Ig6wfDyUEMilXHL6lp5bq00WLZiYE7viV3-Io2g_bY4b94dwttdT_nqnKYj3_5HeiIZlopTscrXK6rMxR779ZgLlBNSt2Ld0_ouSJqm-DdITLT10otqbpSHWhd-XPfWzlw5JDuHjDQ&h=9syV5gkAdtOfF4HueZAyg4SIOsmj8JtGJCAPYkbv5Pk response: body: - string: "{\r\n \"startTime\": \"2023-05-25T23:03:47.7376851+00:00\",\r\n \"endTime\": - \"2023-05-25T23:03:48.7846726+00:00\",\r\n \"status\": \"Succeeded\",\r\n - \ \"name\": \"7bdcf430-81d6-4fdf-ad56-7adf8d956e85\"\r\n}" + string: "{\r\n \"startTime\": \"2026-03-05T05:52:26.7812856+00:00\",\r\n \"status\": + \"InProgress\",\r\n \"name\": \"6e9f9185-e5f5-46a0-857d-43d133c229e5\"\r\n}" + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + date: + - Thu, 05 Mar 2026 05:52:27 GMT + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-ms-need-to-refresh-epl-cache: + - 'False' + x-ms-operation-identifier: + - tenantId=ed94de55-1f87-4278-9651-525e7ba467d6,objectId=eeaac044-cddc-4cec-b91d-6c1da8fcdee0/southeastasia/f4b69d36-116c-4eb2-8b42-8370213b970e + x-ms-ratelimit-remaining-resource: + - Microsoft.Compute/GetOperationResource;44,Microsoft.Compute/GetOperationSubscriptionMaximum;14999 + x-ms-ratelimit-remaining-subscription-global-reads: + - '3749' + x-msedge-ref: + - 'Ref A: 3544DC84C04742879BBF09F020826289 Ref B: SG2AA1040515060 Ref C: 2026-03-05T05:52:27Z' + status: + code: 200 + message: '' +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + CommandName: + - vm host create + Connection: + - keep-alive + ParameterSetName: + - -n --host-group -d -g --sku + User-Agent: + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Compute/locations/westus/operations/6e9f9185-e5f5-46a0-857d-43d133c229e5?p=7b868834-a188-4556-8f0e-38ea6fa689ce&api-version=2024-11-01&t=639082867470960290&c=MIIHhzCCBm-gAwIBAgITHgf_SDN0U0Mca1SI0QAAB_9IMzANBgkqhkiG9w0BAQsFADBEMRMwEQYKCZImiZPyLGQBGRYDR0JMMRMwEQYKCZImiZPyLGQBGRYDQU1FMRgwFgYDVQQDEw9BTUUgSW5mcmEgQ0EgMDYwHhcNMjYwMjE5MDYyODQwWhcNMjYwNTI0MjI1NzAzWjBAMT4wPAYDVQQDEzVhc3luY29wZXJhdGlvbnNpZ25pbmdjZXJ0aWZpY2F0ZS5tYW5hZ2VtZW50LmF6dXJlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3gBluMdrlD6ardxpqPmBSYIWUZhonBiNzi4-_Vw3g3Oro9RIUs6ZgPPueDGitorH-JQTqF81P_5QS92kcH5lXb6doPV1q_erI_e20ynOSwv5jOpwnB8O1qCCtpY__wHr-QcB0Lm4K2jLBpKfwOLTZXA5H1VEKxDXmEWP-EQELhaFdVIeJT0qwvEmhDGKnNn1-T2y6yZrYJaboDSEK8AB9KXc3mWLIwh8_G9twTikTy2PuHSzGUy9QLpQMImh2VVk5Ry0Pfpaypw8fNupz4-WjXfVayKHDyMN5Sbe7kFmbdxnISJ2hOyPiNlGB5nUXpHRpMys37mof_UwKdRJcixQECAwEAAaOCBHQwggRwMCcGCSsGAQQBgjcVCgQaMBgwCgYIKwYBBQUHAwEwCgYIKwYBBQUHAwIwPQYJKwYBBAGCNxUHBDAwLgYmKwYBBAGCNxUIhpDjDYTVtHiE8Ys-hZvdFs6dEoFghfmRS4WsmTQCAWQCAQcwggHLBggrBgEFBQcBAQSCAb0wggG5MGMGCCsGAQUFBzAChldodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20vcGtpaW5mcmEvQ2VydHMvQkwyUEtJSU5UQ0EwMi5BTUUuR0JMX0FNRSUyMEluZnJhJTIwQ0ElMjAwNi5jcnQwUwYIKwYBBQUHMAKGR2h0dHA6Ly9jcmwxLmFtZS5nYmwvYWlhL0JMMlBLSUlOVENBMDIuQU1FLkdCTF9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3J0MFMGCCsGAQUFBzAChkdodHRwOi8vY3JsMi5hbWUuZ2JsL2FpYS9CTDJQS0lJTlRDQTAyLkFNRS5HQkxfQU1FJTIwSW5mcmElMjBDQSUyMDA2LmNydDBTBggrBgEFBQcwAoZHaHR0cDovL2NybDMuYW1lLmdibC9haWEvQkwyUEtJSU5UQ0EwMi5BTUUuR0JMX0FNRSUyMEluZnJhJTIwQ0ElMjAwNi5jcnQwUwYIKwYBBQUHMAKGR2h0dHA6Ly9jcmw0LmFtZS5nYmwvYWlhL0JMMlBLSUlOVENBMDIuQU1FLkdCTF9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3J0MB0GA1UdDgQWBBSpvSUr5SOrbw_81Q-45DGZEMcVRTAOBgNVHQ8BAf8EBAMCBaAwggEmBgNVHR8EggEdMIIBGTCCARWgggERoIIBDYY_aHR0cDovL2NybC5taWNyb3NvZnQuY29tL3BraWluZnJhL0NSTC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMS5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMi5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMy5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsNC5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JsMIGdBgNVHSAEgZUwgZIwDAYKKwYBBAGCN3sBATBmBgorBgEEAYI3ewICMFgwVgYIKwYBBQUHAgIwSh5IADMAMwBlADAAMQA5ADIAMQAtADQAZAA2ADQALQA0AGYAOABjAC0AYQAwADUANQAtADUAYgBkAGEAZgBmAGQANQBlADMAMwBkMAwGCisGAQQBgjd7AwIwDAYKKwYBBAGCN3sEAjAfBgNVHSMEGDAWgBTxRmjG8cPwKy19i2rhsvm-NfzRQTAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADggEBAKPjOlpIFqUJhjTDyaKZju7P26-JxO3YKxxpiuONQQggB8Pfq85rCh4TYMRHTRF9-SaF8UFmfyVp5MKCn4fUJyrYkEfjqrWoLand203HijTSRlrvcI7H4LdCBUE4oWD21md4XzcNZ61hmhlg0z_LvEFluaWR6FJJgORgK2V5zkvf8GxYZY8SiUSX0FogWYQc0rgwrb9F3zZBApnguvbEEAfZWGixBF7eZX5U89oac2ZpJ6yt5mIyLIWUbKqOIqoTVE7ZWq0g-rZNF6SxzSWnEzmKCIVtHC_4lvCAtexAJWshMLjYvNKYi9WTvrrtCKixbaK9Y4uLtPGLUgNVtMFnJIc&s=TSQ0PHjiG3VS40EHK3AC0-wawxMiEow_bbQRy4kfeMHw7NDkGtAwYw6DtcNtyMQmuKu-o0nRSYYO3NhyoqIv_LAJqimg_pkZ6Mua1_vtT0sgWDDAbQE_MFPerujtbzrGNG6BHyQlrLzjZzGbxWsS0Fgp9hWyBny5MsQd7hH731JZHOByMi5lbyJeVWfUqnxOmWhDothtKSG1Ig6wfDyUEMilXHL6lp5bq00WLZiYE7viV3-Io2g_bY4b94dwttdT_nqnKYj3_5HeiIZlopTscrXK6rMxR779ZgLlBNSt2Ld0_ouSJqm-DdITLT10otqbpSHWhd-XPfWzlw5JDuHjDQ&h=9syV5gkAdtOfF4HueZAyg4SIOsmj8JtGJCAPYkbv5Pk + response: + body: + string: "{\r\n \"startTime\": \"2026-03-05T05:52:26.7812856+00:00\",\r\n \"endTime\": + \"2026-03-05T05:52:28.4062927+00:00\",\r\n \"status\": \"Succeeded\",\r\n + \ \"name\": \"6e9f9185-e5f5-46a0-857d-43d133c229e5\"\r\n}" headers: cache-control: - no-cache @@ -284,27 +356,30 @@ interactions: content-type: - application/json; charset=utf-8 date: - - Thu, 25 May 2023 23:04:18 GMT + - Thu, 05 Mar 2026 05:52:58 GMT expires: - '-1' pragma: - no-cache - server: - - Microsoft-HTTPAPI/2.0 - - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-need-to-refresh-epl-cache: + - 'False' + x-ms-operation-identifier: + - tenantId=ed94de55-1f87-4278-9651-525e7ba467d6,objectId=eeaac044-cddc-4cec-b91d-6c1da8fcdee0/southeastasia/1c09be18-6651-4571-b860-a654583a085d x-ms-ratelimit-remaining-resource: - - Microsoft.Compute/GetOperation3Min;14999,Microsoft.Compute/GetOperation30Min;29999 + - Microsoft.Compute/GetOperationResource;43,Microsoft.Compute/GetOperationSubscriptionMaximum;14998 + x-ms-ratelimit-remaining-subscription-global-reads: + - '3749' + x-msedge-ref: + - 'Ref A: 7D475013C09E407584AF9D0DEAA03AF2 Ref B: SG2AA1070304040 Ref C: 2026-03-05T05:52:58Z' status: code: 200 - message: OK + message: '' - request: body: null headers: @@ -319,49 +394,49 @@ interactions: ParameterSetName: - -n --host-group -d -g --sku User-Agent: - - AZURECLI/2.47.0 azsdk-python-azure-mgmt-compute/29.1.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host?api-version=2024-11-01 response: body: string: "{\r\n \"name\": \"my-host\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host\",\r\n \ \"type\": \"Microsoft.Compute/hostGroups/hosts\",\r\n \"location\": \"westus\",\r\n - \ \"sku\": {\r\n \"name\": \"DSv3-Type1\"\r\n },\r\n \"properties\": + \ \"sku\": {\r\n \"name\": \"DCSv2-Type1\"\r\n },\r\n \"properties\": {\r\n \"platformFaultDomain\": 2,\r\n \"autoReplaceOnFailure\": true,\r\n - \ \"hostId\": \"6d6cbc99-3866-4077-a0c2-e6561d64b989\",\r\n \"virtualMachines\": - [],\r\n \"provisioningTime\": \"2023-05-25T23:03:48.3627401+00:00\",\r\n - \ \"provisioningState\": \"Succeeded\",\r\n \"timeCreated\": \"2023-05-25T23:03:47.8470658+00:00\"\r\n + \ \"hostId\": \"d540fa68-5e31-412d-9ec8-95e529c2df3c\",\r\n \"virtualMachines\": + [],\r\n \"provisioningTime\": \"2026-03-05T05:52:28.2657271+00:00\",\r\n + \ \"provisioningState\": \"Succeeded\",\r\n \"timeCreated\": \"2026-03-05T05:52:26.8906734+00:00\"\r\n \ }\r\n}" headers: cache-control: - no-cache content-length: - - '655' + - '656' content-type: - application/json; charset=utf-8 date: - - Thu, 25 May 2023 23:04:18 GMT + - Thu, 05 Mar 2026 05:52:58 GMT expires: - '-1' pragma: - no-cache - server: - - Microsoft-HTTPAPI/2.0 - - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-need-to-refresh-epl-cache: + - 'False' x-ms-ratelimit-remaining-resource: - - Microsoft.Compute/GetDedicatedHost3Min;247,Microsoft.Compute/GetDedicatedHost30Min;998 + - Microsoft.Compute/GetDedicatedHostSubscriptionMaximum;536 + x-ms-ratelimit-remaining-subscription-global-reads: + - '3749' + x-msedge-ref: + - 'Ref A: 798AC1844BED4A628EC8A72A2D3DA1C3 Ref B: SG2AA1070301025 Ref C: 2026-03-05T05:52:59Z' status: code: 200 - message: OK + message: '' - request: body: null headers: @@ -376,47 +451,41 @@ interactions: ParameterSetName: - --host-group -g User-Agent: - - AZURECLI/2.47.0 (AAZ) azsdk-python-core/1.24.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts?api-version=2022-11-01 response: body: - string: "{\r\n \"value\": [\r\n {\r\n \"name\": \"my-host\",\r\n \"id\": - \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/CLI_TEST_VM_HOST_MANAGEMENT_VSK5JJTFLJXLNBF6EJOQUDHOHV3DYR2RSXARMIO3BG3CNEL/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host\",\r\n - \ \"type\": \"Microsoft.Compute/hostGroups/hosts\",\r\n \"location\": - \"westus\",\r\n \"sku\": {\r\n \"name\": \"DSv3-Type1\"\r\n },\r\n - \ \"properties\": {\r\n \"platformFaultDomain\": 2,\r\n \"autoReplaceOnFailure\": - true,\r\n \"hostId\": \"6d6cbc99-3866-4077-a0c2-e6561d64b989\",\r\n - \ \"virtualMachines\": [],\r\n \"provisioningTime\": \"2023-05-25T23:03:48.3627401+00:00\",\r\n - \ \"provisioningState\": \"Succeeded\",\r\n \"timeCreated\": - \"2023-05-25T23:03:47.8470658+00:00\"\r\n }\r\n }\r\n ]\r\n}" + string: '{"value":[{"name":"my-host","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/CLI_TEST_VM_HOST_MANAGEMENT_Y4TE2FKHMDCYRNGXNCCBDXM3RPP7R62JDEADYLYQV2N27B2/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host","type":"Microsoft.Compute/hostGroups/hosts","location":"westus","sku":{"name":"DCSv2-Type1"},"properties":{"platformFaultDomain":2,"autoReplaceOnFailure":true,"hostId":"d540fa68-5e31-412d-9ec8-95e529c2df3c","virtualMachines":[],"provisioningTime":"2026-03-05T05:52:28.2657271+00:00","provisioningState":"Succeeded","timeCreated":"2026-03-05T05:52:26.8906734+00:00"}}]}' headers: cache-control: - no-cache content-length: - - '793' + - '613' content-type: - application/json; charset=utf-8 date: - - Thu, 25 May 2023 23:04:18 GMT + - Thu, 05 Mar 2026 05:52:59 GMT expires: - '-1' pragma: - no-cache - server: - - Microsoft-HTTPAPI/2.0 - - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-need-to-refresh-epl-cache: + - 'False' + x-ms-original-request-ids: + - c4dff6e4-e37c-455f-8946-22bc41831da6 x-ms-ratelimit-remaining-resource: - - Microsoft.Compute/GetDedicatedHost3Min;246,Microsoft.Compute/GetDedicatedHost30Min;997 + - Microsoft.Compute/GetDedicatedHostSubscriptionMaximum;535 + x-ms-ratelimit-remaining-subscription-global-reads: + - '3749' + x-msedge-ref: + - 'Ref A: 6D2AE87EB1E048599A8E9BA1D3FE3768 Ref B: SG2AA1070301031 Ref C: 2026-03-05T05:53:00Z' status: code: 200 message: OK @@ -434,16 +503,15 @@ interactions: ParameterSetName: - -n -g --set User-Agent: - - AZURECLI/2.47.0 azsdk-python-azure-mgmt-compute/29.1.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group?api-version=2024-11-01 response: body: - string: "{\r\n \"name\": \"my-host-group\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/CLI_TEST_VM_HOST_MANAGEMENT_VSK5JJTFLJXLNBF6EJOQUDHOHV3DYR2RSXARMIO3BG3CNEL/providers/Microsoft.Compute/hostGroups/my-host-group\",\r\n + string: "{\r\n \"name\": \"my-host-group\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/CLI_TEST_VM_HOST_MANAGEMENT_Y4TE2FKHMDCYRNGXNCCBDXM3RPP7R62JDEADYLYQV2N27B2/providers/Microsoft.Compute/hostGroups/my-host-group\",\r\n \ \"type\": \"Microsoft.Compute/hostGroups\",\r\n \"location\": \"westus\",\r\n \ \"properties\": {\r\n \"platformFaultDomainCount\": 3,\r\n \"hosts\": - [\r\n {\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/CLI_TEST_VM_HOST_MANAGEMENT_VSK5JJTFLJXLNBF6EJOQUDHOHV3DYR2RSXARMIO3BG3CNEL/providers/Microsoft.Compute/hostGroups/MY-HOST-GROUP/hosts/MY-HOST\"\r\n + [\r\n {\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/CLI_TEST_VM_HOST_MANAGEMENT_Y4TE2FKHMDCYRNGXNCCBDXM3RPP7R62JDEADYLYQV2N27B2/providers/Microsoft.Compute/hostGroups/MY-HOST-GROUP/hosts/MY-HOST\"\r\n \ }\r\n ],\r\n \"supportAutomaticPlacement\": false\r\n }\r\n}" headers: cache-control: @@ -453,27 +521,28 @@ interactions: content-type: - application/json; charset=utf-8 date: - - Thu, 25 May 2023 23:04:19 GMT + - Thu, 05 Mar 2026 05:53:00 GMT expires: - '-1' pragma: - no-cache - server: - - Microsoft-HTTPAPI/2.0 - - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-need-to-refresh-epl-cache: + - 'False' x-ms-ratelimit-remaining-resource: - - Microsoft.Compute/GetDedicatedHost30Min;996 + - Microsoft.Compute/GetDedicatedHostSubscriptionMaximum;534 + x-ms-ratelimit-remaining-subscription-global-reads: + - '3749' + x-msedge-ref: + - 'Ref A: F3A06B3B662C475D98FA089D4CCBB094 Ref B: SG2AA1040518023 Ref C: 2026-03-05T05:53:00Z' status: code: 200 - message: OK + message: '' - request: body: '{"location": "westus", "tags": {"foo": "bar"}, "properties": {"platformFaultDomainCount": 3, "supportAutomaticPlacement": false}}' @@ -493,13 +562,12 @@ interactions: ParameterSetName: - -n -g --set User-Agent: - - AZURECLI/2.47.0 azsdk-python-azure-mgmt-compute/29.1.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: PUT uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group?api-version=2024-11-01 response: body: - string: "{\r\n \"name\": \"my-host-group\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/CLI_TEST_VM_HOST_MANAGEMENT_VSK5JJTFLJXLNBF6EJOQUDHOHV3DYR2RSXARMIO3BG3CNEL/providers/Microsoft.Compute/hostGroups/my-host-group\",\r\n + string: "{\r\n \"name\": \"my-host-group\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/CLI_TEST_VM_HOST_MANAGEMENT_Y4TE2FKHMDCYRNGXNCCBDXM3RPP7R62JDEADYLYQV2N27B2/providers/Microsoft.Compute/hostGroups/my-host-group\",\r\n \ \"type\": \"Microsoft.Compute/hostGroups\",\r\n \"location\": \"westus\",\r\n \ \"tags\": {\r\n \"foo\": \"bar\"\r\n },\r\n \"properties\": {\r\n \"platformFaultDomainCount\": 3,\r\n \"supportAutomaticPlacement\": false\r\n }\r\n}" @@ -511,26 +579,75 @@ interactions: content-type: - application/json; charset=utf-8 date: - - Thu, 25 May 2023 23:04:20 GMT + - Thu, 05 Mar 2026 05:53:01 GMT expires: - '-1' pragma: - no-cache - server: - - Microsoft-HTTPAPI/2.0 - - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-need-to-refresh-epl-cache: + - 'False' + x-ms-operation-identifier: + - tenantId=ed94de55-1f87-4278-9651-525e7ba467d6,objectId=eeaac044-cddc-4cec-b91d-6c1da8fcdee0/westus/3e05ca33-3a1e-41fc-b6e8-0b1d60c40d26 x-ms-ratelimit-remaining-resource: - - Microsoft.Compute/PutDeleteDedicatedHost3Min;117,Microsoft.Compute/PutDeleteDedicatedHost30Min;597 + - Microsoft.Compute/PutDeleteDedicatedHostSubscriptionMaximum;117 + x-ms-ratelimit-remaining-subscription-global-writes: + - '2999' x-ms-ratelimit-remaining-subscription-writes: - - '1199' + - '199' + x-msedge-ref: + - 'Ref A: BBC420EEF2684374BDC7392C1415D4EE Ref B: SG2AA1040512054 Ref C: 2026-03-05T05:53:01Z' + status: + code: 200 + message: '' +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + CommandName: + - vm host update + Connection: + - keep-alive + ParameterSetName: + - -n --host-group -g --set + User-Agent: + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vm_host_management_000001?api-version=2024-11-01 + response: + body: + string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001","name":"cli_test_vm_host_management_000001","type":"Microsoft.Resources/resourceGroups","location":"westus","tags":{"product":"azurecli","cause":"automation","test":"test_vm_host_management","date":"2026-03-05T05:52:18Z","module":"vm"},"properties":{"provisioningState":"Succeeded"}}' + headers: + cache-control: + - no-cache + content-length: + - '393' + content-type: + - application/json; charset=utf-8 + date: + - Thu, 05 Mar 2026 05:53:02 GMT + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-ms-ratelimit-remaining-subscription-global-reads: + - '3749' + x-msedge-ref: + - 'Ref A: 7D5AC7AC24AE411C8C8671442116C182 Ref B: SG2AA1070304040 Ref C: 2026-03-05T05:53:02Z' status: code: 200 message: OK @@ -548,52 +665,52 @@ interactions: ParameterSetName: - -n --host-group -g --set User-Agent: - - AZURECLI/2.47.0 azsdk-python-azure-mgmt-compute/29.1.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host?api-version=2024-11-01 + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host?api-version=2022-11-01 response: body: string: "{\r\n \"name\": \"my-host\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host\",\r\n \ \"type\": \"Microsoft.Compute/hostGroups/hosts\",\r\n \"location\": \"westus\",\r\n - \ \"sku\": {\r\n \"name\": \"DSv3-Type1\"\r\n },\r\n \"properties\": + \ \"sku\": {\r\n \"name\": \"DCSv2-Type1\"\r\n },\r\n \"properties\": {\r\n \"platformFaultDomain\": 2,\r\n \"autoReplaceOnFailure\": true,\r\n - \ \"hostId\": \"6d6cbc99-3866-4077-a0c2-e6561d64b989\",\r\n \"virtualMachines\": - [],\r\n \"provisioningTime\": \"2023-05-25T23:03:48.3627401+00:00\",\r\n - \ \"provisioningState\": \"Succeeded\",\r\n \"timeCreated\": \"2023-05-25T23:03:47.8470658+00:00\"\r\n + \ \"hostId\": \"d540fa68-5e31-412d-9ec8-95e529c2df3c\",\r\n \"virtualMachines\": + [],\r\n \"provisioningTime\": \"2026-03-05T05:52:28.2657271+00:00\",\r\n + \ \"provisioningState\": \"Succeeded\",\r\n \"timeCreated\": \"2026-03-05T05:52:26.8906734+00:00\"\r\n \ }\r\n}" headers: cache-control: - no-cache content-length: - - '655' + - '656' content-type: - application/json; charset=utf-8 date: - - Thu, 25 May 2023 23:04:20 GMT + - Thu, 05 Mar 2026 05:53:03 GMT expires: - '-1' pragma: - no-cache - server: - - Microsoft-HTTPAPI/2.0 - - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-need-to-refresh-epl-cache: + - 'False' x-ms-ratelimit-remaining-resource: - - Microsoft.Compute/GetDedicatedHost3Min;245,Microsoft.Compute/GetDedicatedHost30Min;995 + - Microsoft.Compute/GetDedicatedHostSubscriptionMaximum;533 + x-ms-ratelimit-remaining-subscription-global-reads: + - '3749' + x-msedge-ref: + - 'Ref A: 19722946C30743AD96E5CADD4B950344 Ref B: SG2AA1040518025 Ref C: 2026-03-05T05:53:02Z' status: code: 200 - message: OK + message: '' - request: - body: '{"location": "westus", "tags": {"foo": "bar"}, "sku": {"name": "DSv3-Type1"}, - "properties": {"platformFaultDomain": 2, "autoReplaceOnFailure": true}}' + body: '{"location": "westus", "properties": {"autoReplaceOnFailure": true, "platformFaultDomain": + 2}, "sku": {"name": "DCSv2-Type1"}, "tags": {"foo": "bar"}}' headers: Accept: - application/json @@ -604,61 +721,63 @@ interactions: Connection: - keep-alive Content-Length: - - '149' + - '150' Content-Type: - application/json ParameterSetName: - -n --host-group -g --set User-Agent: - - AZURECLI/2.47.0 azsdk-python-azure-mgmt-compute/29.1.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: PUT - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host?api-version=2024-11-01 + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host?api-version=2022-11-01 response: body: string: "{\r\n \"name\": \"my-host\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host\",\r\n \ \"type\": \"Microsoft.Compute/hostGroups/hosts\",\r\n \"location\": \"westus\",\r\n \ \"tags\": {\r\n \"foo\": \"bar\"\r\n },\r\n \"sku\": {\r\n \"name\": - \"DSv3-Type1\"\r\n },\r\n \"properties\": {\r\n \"platformFaultDomain\": - 2,\r\n \"autoReplaceOnFailure\": true,\r\n \"hostId\": \"6d6cbc99-3866-4077-a0c2-e6561d64b989\",\r\n - \ \"provisioningTime\": \"2023-05-25T23:03:48.3627401+00:00\",\r\n \"provisioningState\": - \"Updating\",\r\n \"timeCreated\": \"2023-05-25T23:03:47.8470658+00:00\"\r\n + \"DCSv2-Type1\"\r\n },\r\n \"properties\": {\r\n \"platformFaultDomain\": + 2,\r\n \"autoReplaceOnFailure\": true,\r\n \"hostId\": \"d540fa68-5e31-412d-9ec8-95e529c2df3c\",\r\n + \ \"provisioningTime\": \"2026-03-05T05:52:28.2657271+00:00\",\r\n \"provisioningState\": + \"Updating\",\r\n \"timeCreated\": \"2026-03-05T05:52:26.8906734+00:00\"\r\n \ }\r\n}" headers: azure-asyncnotification: - Enabled azure-asyncoperation: - - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Compute/locations/westus/operations/3c40b1a2-a93e-493b-8a9f-bd500aa5b73a?p=571046f6-b640-41c1-86f7-f9f044b5adf9&api-version=2024-11-01 + - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Compute/locations/westus/operations/5e33f996-21f7-49c4-9666-fb650e67fa87?p=7b868834-a188-4556-8f0e-38ea6fa689ce&api-version=2022-11-01&t=639082867836297090&c=MIIHhzCCBm-gAwIBAgITHgf_SDN0U0Mca1SI0QAAB_9IMzANBgkqhkiG9w0BAQsFADBEMRMwEQYKCZImiZPyLGQBGRYDR0JMMRMwEQYKCZImiZPyLGQBGRYDQU1FMRgwFgYDVQQDEw9BTUUgSW5mcmEgQ0EgMDYwHhcNMjYwMjE5MDYyODQwWhcNMjYwNTI0MjI1NzAzWjBAMT4wPAYDVQQDEzVhc3luY29wZXJhdGlvbnNpZ25pbmdjZXJ0aWZpY2F0ZS5tYW5hZ2VtZW50LmF6dXJlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3gBluMdrlD6ardxpqPmBSYIWUZhonBiNzi4-_Vw3g3Oro9RIUs6ZgPPueDGitorH-JQTqF81P_5QS92kcH5lXb6doPV1q_erI_e20ynOSwv5jOpwnB8O1qCCtpY__wHr-QcB0Lm4K2jLBpKfwOLTZXA5H1VEKxDXmEWP-EQELhaFdVIeJT0qwvEmhDGKnNn1-T2y6yZrYJaboDSEK8AB9KXc3mWLIwh8_G9twTikTy2PuHSzGUy9QLpQMImh2VVk5Ry0Pfpaypw8fNupz4-WjXfVayKHDyMN5Sbe7kFmbdxnISJ2hOyPiNlGB5nUXpHRpMys37mof_UwKdRJcixQECAwEAAaOCBHQwggRwMCcGCSsGAQQBgjcVCgQaMBgwCgYIKwYBBQUHAwEwCgYIKwYBBQUHAwIwPQYJKwYBBAGCNxUHBDAwLgYmKwYBBAGCNxUIhpDjDYTVtHiE8Ys-hZvdFs6dEoFghfmRS4WsmTQCAWQCAQcwggHLBggrBgEFBQcBAQSCAb0wggG5MGMGCCsGAQUFBzAChldodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20vcGtpaW5mcmEvQ2VydHMvQkwyUEtJSU5UQ0EwMi5BTUUuR0JMX0FNRSUyMEluZnJhJTIwQ0ElMjAwNi5jcnQwUwYIKwYBBQUHMAKGR2h0dHA6Ly9jcmwxLmFtZS5nYmwvYWlhL0JMMlBLSUlOVENBMDIuQU1FLkdCTF9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3J0MFMGCCsGAQUFBzAChkdodHRwOi8vY3JsMi5hbWUuZ2JsL2FpYS9CTDJQS0lJTlRDQTAyLkFNRS5HQkxfQU1FJTIwSW5mcmElMjBDQSUyMDA2LmNydDBTBggrBgEFBQcwAoZHaHR0cDovL2NybDMuYW1lLmdibC9haWEvQkwyUEtJSU5UQ0EwMi5BTUUuR0JMX0FNRSUyMEluZnJhJTIwQ0ElMjAwNi5jcnQwUwYIKwYBBQUHMAKGR2h0dHA6Ly9jcmw0LmFtZS5nYmwvYWlhL0JMMlBLSUlOVENBMDIuQU1FLkdCTF9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3J0MB0GA1UdDgQWBBSpvSUr5SOrbw_81Q-45DGZEMcVRTAOBgNVHQ8BAf8EBAMCBaAwggEmBgNVHR8EggEdMIIBGTCCARWgggERoIIBDYY_aHR0cDovL2NybC5taWNyb3NvZnQuY29tL3BraWluZnJhL0NSTC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMS5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMi5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMy5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsNC5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JsMIGdBgNVHSAEgZUwgZIwDAYKKwYBBAGCN3sBATBmBgorBgEEAYI3ewICMFgwVgYIKwYBBQUHAgIwSh5IADMAMwBlADAAMQA5ADIAMQAtADQAZAA2ADQALQA0AGYAOABjAC0AYQAwADUANQAtADUAYgBkAGEAZgBmAGQANQBlADMAMwBkMAwGCisGAQQBgjd7AwIwDAYKKwYBBAGCN3sEAjAfBgNVHSMEGDAWgBTxRmjG8cPwKy19i2rhsvm-NfzRQTAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADggEBAKPjOlpIFqUJhjTDyaKZju7P26-JxO3YKxxpiuONQQggB8Pfq85rCh4TYMRHTRF9-SaF8UFmfyVp5MKCn4fUJyrYkEfjqrWoLand203HijTSRlrvcI7H4LdCBUE4oWD21md4XzcNZ61hmhlg0z_LvEFluaWR6FJJgORgK2V5zkvf8GxYZY8SiUSX0FogWYQc0rgwrb9F3zZBApnguvbEEAfZWGixBF7eZX5U89oac2ZpJ6yt5mIyLIWUbKqOIqoTVE7ZWq0g-rZNF6SxzSWnEzmKCIVtHC_4lvCAtexAJWshMLjYvNKYi9WTvrrtCKixbaK9Y4uLtPGLUgNVtMFnJIc&s=vC1VDptLg_-de3HUDS33uDA3rTBaYVPzPtopfR7uPT_1rnWIBuucwXIsf3bAe9tu3mLdva-q-BOeZweQFLC6Xwa4CO1TfkDJLcRT5f6mwpbLpTZD6uPoqHFI8O9NokUgnSnOS3y36qW-JXP8fBo-B8B4RtGiNW5mejFsqj4f8kW5rrKF_ydsdtYshoPTeyHiVhCWhmc85pcylwecXu1pl7DsLZvV02wP14CKmfdvSZb3iJm979UW79sA3GPf4WC5UUs0_i02Q-3ZBBKe-zZPGOxNfuFHxhri4jte_yAtmhaaqzoATQMLDqkVgPyi_mcTRdtaXEhWwXQYLKuxnLXohA&h=6lwuxG5OpsXO_0Bd3l-_-NVETrJsbScQYfBduf19yu0 cache-control: - no-cache content-length: - - '663' + - '664' content-type: - application/json; charset=utf-8 date: - - Thu, 25 May 2023 23:04:21 GMT + - Thu, 05 Mar 2026 05:53:02 GMT expires: - '-1' pragma: - no-cache - server: - - Microsoft-HTTPAPI/2.0 - - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-need-to-refresh-epl-cache: + - 'False' + x-ms-operation-identifier: + - tenantId=ed94de55-1f87-4278-9651-525e7ba467d6,objectId=eeaac044-cddc-4cec-b91d-6c1da8fcdee0/westus/5b83d630-6fe7-44a2-8866-c293bb4a850a x-ms-ratelimit-remaining-resource: - - Microsoft.Compute/PutDeleteDedicatedHost3Min;116,Microsoft.Compute/PutDeleteDedicatedHost30Min;596 + - Microsoft.Compute/PutDeleteDedicatedHostSubscriptionMaximum;116 + x-ms-ratelimit-remaining-subscription-global-writes: + - '2999' x-ms-ratelimit-remaining-subscription-writes: - - '1198' + - '199' + x-msedge-ref: + - 'Ref A: E8C43D2EACB14EF589544773070FF8FA Ref B: SG2AA1040519023 Ref C: 2026-03-05T05:53:03Z' status: code: 200 - message: OK + message: '' - request: body: null headers: @@ -673,15 +792,14 @@ interactions: ParameterSetName: - -n --host-group -g --set User-Agent: - - AZURECLI/2.47.0 azsdk-python-azure-mgmt-compute/29.1.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Compute/locations/westus/operations/3c40b1a2-a93e-493b-8a9f-bd500aa5b73a?p=571046f6-b640-41c1-86f7-f9f044b5adf9&api-version=2024-11-01 + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Compute/locations/westus/operations/5e33f996-21f7-49c4-9666-fb650e67fa87?p=7b868834-a188-4556-8f0e-38ea6fa689ce&api-version=2022-11-01&t=639082867836297090&c=MIIHhzCCBm-gAwIBAgITHgf_SDN0U0Mca1SI0QAAB_9IMzANBgkqhkiG9w0BAQsFADBEMRMwEQYKCZImiZPyLGQBGRYDR0JMMRMwEQYKCZImiZPyLGQBGRYDQU1FMRgwFgYDVQQDEw9BTUUgSW5mcmEgQ0EgMDYwHhcNMjYwMjE5MDYyODQwWhcNMjYwNTI0MjI1NzAzWjBAMT4wPAYDVQQDEzVhc3luY29wZXJhdGlvbnNpZ25pbmdjZXJ0aWZpY2F0ZS5tYW5hZ2VtZW50LmF6dXJlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3gBluMdrlD6ardxpqPmBSYIWUZhonBiNzi4-_Vw3g3Oro9RIUs6ZgPPueDGitorH-JQTqF81P_5QS92kcH5lXb6doPV1q_erI_e20ynOSwv5jOpwnB8O1qCCtpY__wHr-QcB0Lm4K2jLBpKfwOLTZXA5H1VEKxDXmEWP-EQELhaFdVIeJT0qwvEmhDGKnNn1-T2y6yZrYJaboDSEK8AB9KXc3mWLIwh8_G9twTikTy2PuHSzGUy9QLpQMImh2VVk5Ry0Pfpaypw8fNupz4-WjXfVayKHDyMN5Sbe7kFmbdxnISJ2hOyPiNlGB5nUXpHRpMys37mof_UwKdRJcixQECAwEAAaOCBHQwggRwMCcGCSsGAQQBgjcVCgQaMBgwCgYIKwYBBQUHAwEwCgYIKwYBBQUHAwIwPQYJKwYBBAGCNxUHBDAwLgYmKwYBBAGCNxUIhpDjDYTVtHiE8Ys-hZvdFs6dEoFghfmRS4WsmTQCAWQCAQcwggHLBggrBgEFBQcBAQSCAb0wggG5MGMGCCsGAQUFBzAChldodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20vcGtpaW5mcmEvQ2VydHMvQkwyUEtJSU5UQ0EwMi5BTUUuR0JMX0FNRSUyMEluZnJhJTIwQ0ElMjAwNi5jcnQwUwYIKwYBBQUHMAKGR2h0dHA6Ly9jcmwxLmFtZS5nYmwvYWlhL0JMMlBLSUlOVENBMDIuQU1FLkdCTF9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3J0MFMGCCsGAQUFBzAChkdodHRwOi8vY3JsMi5hbWUuZ2JsL2FpYS9CTDJQS0lJTlRDQTAyLkFNRS5HQkxfQU1FJTIwSW5mcmElMjBDQSUyMDA2LmNydDBTBggrBgEFBQcwAoZHaHR0cDovL2NybDMuYW1lLmdibC9haWEvQkwyUEtJSU5UQ0EwMi5BTUUuR0JMX0FNRSUyMEluZnJhJTIwQ0ElMjAwNi5jcnQwUwYIKwYBBQUHMAKGR2h0dHA6Ly9jcmw0LmFtZS5nYmwvYWlhL0JMMlBLSUlOVENBMDIuQU1FLkdCTF9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3J0MB0GA1UdDgQWBBSpvSUr5SOrbw_81Q-45DGZEMcVRTAOBgNVHQ8BAf8EBAMCBaAwggEmBgNVHR8EggEdMIIBGTCCARWgggERoIIBDYY_aHR0cDovL2NybC5taWNyb3NvZnQuY29tL3BraWluZnJhL0NSTC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMS5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMi5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMy5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsNC5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JsMIGdBgNVHSAEgZUwgZIwDAYKKwYBBAGCN3sBATBmBgorBgEEAYI3ewICMFgwVgYIKwYBBQUHAgIwSh5IADMAMwBlADAAMQA5ADIAMQAtADQAZAA2ADQALQA0AGYAOABjAC0AYQAwADUANQAtADUAYgBkAGEAZgBmAGQANQBlADMAMwBkMAwGCisGAQQBgjd7AwIwDAYKKwYBBAGCN3sEAjAfBgNVHSMEGDAWgBTxRmjG8cPwKy19i2rhsvm-NfzRQTAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADggEBAKPjOlpIFqUJhjTDyaKZju7P26-JxO3YKxxpiuONQQggB8Pfq85rCh4TYMRHTRF9-SaF8UFmfyVp5MKCn4fUJyrYkEfjqrWoLand203HijTSRlrvcI7H4LdCBUE4oWD21md4XzcNZ61hmhlg0z_LvEFluaWR6FJJgORgK2V5zkvf8GxYZY8SiUSX0FogWYQc0rgwrb9F3zZBApnguvbEEAfZWGixBF7eZX5U89oac2ZpJ6yt5mIyLIWUbKqOIqoTVE7ZWq0g-rZNF6SxzSWnEzmKCIVtHC_4lvCAtexAJWshMLjYvNKYi9WTvrrtCKixbaK9Y4uLtPGLUgNVtMFnJIc&s=vC1VDptLg_-de3HUDS33uDA3rTBaYVPzPtopfR7uPT_1rnWIBuucwXIsf3bAe9tu3mLdva-q-BOeZweQFLC6Xwa4CO1TfkDJLcRT5f6mwpbLpTZD6uPoqHFI8O9NokUgnSnOS3y36qW-JXP8fBo-B8B4RtGiNW5mejFsqj4f8kW5rrKF_ydsdtYshoPTeyHiVhCWhmc85pcylwecXu1pl7DsLZvV02wP14CKmfdvSZb3iJm979UW79sA3GPf4WC5UUs0_i02Q-3ZBBKe-zZPGOxNfuFHxhri4jte_yAtmhaaqzoATQMLDqkVgPyi_mcTRdtaXEhWwXQYLKuxnLXohA&h=6lwuxG5OpsXO_0Bd3l-_-NVETrJsbScQYfBduf19yu0 response: body: - string: "{\r\n \"startTime\": \"2023-05-25T23:04:21.4567648+00:00\",\r\n \"endTime\": - \"2023-05-25T23:04:21.5036264+00:00\",\r\n \"status\": \"Succeeded\",\r\n - \ \"name\": \"3c40b1a2-a93e-493b-8a9f-bd500aa5b73a\"\r\n}" + string: "{\r\n \"startTime\": \"2026-03-05T05:53:03.5004525+00:00\",\r\n \"endTime\": + \"2026-03-05T05:53:03.5472864+00:00\",\r\n \"status\": \"Succeeded\",\r\n + \ \"name\": \"5e33f996-21f7-49c4-9666-fb650e67fa87\"\r\n}" headers: cache-control: - no-cache @@ -690,27 +808,30 @@ interactions: content-type: - application/json; charset=utf-8 date: - - Thu, 25 May 2023 23:04:51 GMT + - Thu, 05 Mar 2026 05:53:03 GMT expires: - '-1' pragma: - no-cache - server: - - Microsoft-HTTPAPI/2.0 - - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-need-to-refresh-epl-cache: + - 'False' + x-ms-operation-identifier: + - tenantId=ed94de55-1f87-4278-9651-525e7ba467d6,objectId=eeaac044-cddc-4cec-b91d-6c1da8fcdee0/southeastasia/cd0086ed-45f8-446f-98ee-ed3260e8368e x-ms-ratelimit-remaining-resource: - - Microsoft.Compute/GetOperation3Min;14998,Microsoft.Compute/GetOperation30Min;29998 + - Microsoft.Compute/GetOperationResource;44,Microsoft.Compute/GetOperationSubscriptionMaximum;14997 + x-ms-ratelimit-remaining-subscription-global-reads: + - '3749' + x-msedge-ref: + - 'Ref A: 692D6E43F0ED4C5CB57CDC5EC138B6A2 Ref B: SG2AA1040517034 Ref C: 2026-03-05T05:53:04Z' status: code: 200 - message: OK + message: '' - request: body: null headers: @@ -725,49 +846,49 @@ interactions: ParameterSetName: - -n --host-group -g --set User-Agent: - - AZURECLI/2.47.0 azsdk-python-azure-mgmt-compute/29.1.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host?api-version=2024-11-01 + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host?api-version=2022-11-01 response: body: string: "{\r\n \"name\": \"my-host\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host\",\r\n \ \"type\": \"Microsoft.Compute/hostGroups/hosts\",\r\n \"location\": \"westus\",\r\n \ \"tags\": {\r\n \"foo\": \"bar\"\r\n },\r\n \"sku\": {\r\n \"name\": - \"DSv3-Type1\"\r\n },\r\n \"properties\": {\r\n \"platformFaultDomain\": - 2,\r\n \"autoReplaceOnFailure\": true,\r\n \"hostId\": \"6d6cbc99-3866-4077-a0c2-e6561d64b989\",\r\n - \ \"virtualMachines\": [],\r\n \"provisioningTime\": \"2023-05-25T23:03:48.3627401+00:00\",\r\n - \ \"provisioningState\": \"Succeeded\",\r\n \"timeCreated\": \"2023-05-25T23:03:47.8470658+00:00\"\r\n + \"DCSv2-Type1\"\r\n },\r\n \"properties\": {\r\n \"platformFaultDomain\": + 2,\r\n \"autoReplaceOnFailure\": true,\r\n \"hostId\": \"d540fa68-5e31-412d-9ec8-95e529c2df3c\",\r\n + \ \"virtualMachines\": [],\r\n \"provisioningTime\": \"2026-03-05T05:52:28.2657271+00:00\",\r\n + \ \"provisioningState\": \"Succeeded\",\r\n \"timeCreated\": \"2026-03-05T05:52:26.8906734+00:00\"\r\n \ }\r\n}" headers: cache-control: - no-cache content-length: - - '692' + - '693' content-type: - application/json; charset=utf-8 date: - - Thu, 25 May 2023 23:04:51 GMT + - Thu, 05 Mar 2026 05:53:04 GMT expires: - '-1' pragma: - no-cache - server: - - Microsoft-HTTPAPI/2.0 - - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-need-to-refresh-epl-cache: + - 'False' x-ms-ratelimit-remaining-resource: - - Microsoft.Compute/GetDedicatedHost3Min;243,Microsoft.Compute/GetDedicatedHost30Min;993 + - Microsoft.Compute/GetDedicatedHostSubscriptionMaximum;530 + x-ms-ratelimit-remaining-subscription-global-reads: + - '3749' + x-msedge-ref: + - 'Ref A: 783D60BFDDA74709B9411ED5FA39884C Ref B: SG2AA1040515025 Ref C: 2026-03-05T05:53:04Z' status: code: 200 - message: OK + message: '' - request: body: null headers: @@ -784,8 +905,7 @@ interactions: ParameterSetName: - -n --host-group -g --yes User-Agent: - - AZURECLI/2.47.0 (AAZ) azsdk-python-core/1.24.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: DELETE uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group/hosts/my-host?api-version=2022-11-01 response: @@ -795,33 +915,40 @@ interactions: azure-asyncnotification: - Enabled azure-asyncoperation: - - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Compute/locations/westus/operations/2b7662d0-8698-4b7f-9563-72dddd680efb?p=571046f6-b640-41c1-86f7-f9f044b5adf9&api-version=2024-11-01 + - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Compute/locations/westus/operations/c83ad088-146f-4f3a-970c-de7f1403bfee?p=7b868834-a188-4556-8f0e-38ea6fa689ce&api-version=2022-11-01&t=639082867865902301&c=MIIHhzCCBm-gAwIBAgITHgf_SDN0U0Mca1SI0QAAB_9IMzANBgkqhkiG9w0BAQsFADBEMRMwEQYKCZImiZPyLGQBGRYDR0JMMRMwEQYKCZImiZPyLGQBGRYDQU1FMRgwFgYDVQQDEw9BTUUgSW5mcmEgQ0EgMDYwHhcNMjYwMjE5MDYyODQwWhcNMjYwNTI0MjI1NzAzWjBAMT4wPAYDVQQDEzVhc3luY29wZXJhdGlvbnNpZ25pbmdjZXJ0aWZpY2F0ZS5tYW5hZ2VtZW50LmF6dXJlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3gBluMdrlD6ardxpqPmBSYIWUZhonBiNzi4-_Vw3g3Oro9RIUs6ZgPPueDGitorH-JQTqF81P_5QS92kcH5lXb6doPV1q_erI_e20ynOSwv5jOpwnB8O1qCCtpY__wHr-QcB0Lm4K2jLBpKfwOLTZXA5H1VEKxDXmEWP-EQELhaFdVIeJT0qwvEmhDGKnNn1-T2y6yZrYJaboDSEK8AB9KXc3mWLIwh8_G9twTikTy2PuHSzGUy9QLpQMImh2VVk5Ry0Pfpaypw8fNupz4-WjXfVayKHDyMN5Sbe7kFmbdxnISJ2hOyPiNlGB5nUXpHRpMys37mof_UwKdRJcixQECAwEAAaOCBHQwggRwMCcGCSsGAQQBgjcVCgQaMBgwCgYIKwYBBQUHAwEwCgYIKwYBBQUHAwIwPQYJKwYBBAGCNxUHBDAwLgYmKwYBBAGCNxUIhpDjDYTVtHiE8Ys-hZvdFs6dEoFghfmRS4WsmTQCAWQCAQcwggHLBggrBgEFBQcBAQSCAb0wggG5MGMGCCsGAQUFBzAChldodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20vcGtpaW5mcmEvQ2VydHMvQkwyUEtJSU5UQ0EwMi5BTUUuR0JMX0FNRSUyMEluZnJhJTIwQ0ElMjAwNi5jcnQwUwYIKwYBBQUHMAKGR2h0dHA6Ly9jcmwxLmFtZS5nYmwvYWlhL0JMMlBLSUlOVENBMDIuQU1FLkdCTF9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3J0MFMGCCsGAQUFBzAChkdodHRwOi8vY3JsMi5hbWUuZ2JsL2FpYS9CTDJQS0lJTlRDQTAyLkFNRS5HQkxfQU1FJTIwSW5mcmElMjBDQSUyMDA2LmNydDBTBggrBgEFBQcwAoZHaHR0cDovL2NybDMuYW1lLmdibC9haWEvQkwyUEtJSU5UQ0EwMi5BTUUuR0JMX0FNRSUyMEluZnJhJTIwQ0ElMjAwNi5jcnQwUwYIKwYBBQUHMAKGR2h0dHA6Ly9jcmw0LmFtZS5nYmwvYWlhL0JMMlBLSUlOVENBMDIuQU1FLkdCTF9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3J0MB0GA1UdDgQWBBSpvSUr5SOrbw_81Q-45DGZEMcVRTAOBgNVHQ8BAf8EBAMCBaAwggEmBgNVHR8EggEdMIIBGTCCARWgggERoIIBDYY_aHR0cDovL2NybC5taWNyb3NvZnQuY29tL3BraWluZnJhL0NSTC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMS5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMi5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMy5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsNC5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JsMIGdBgNVHSAEgZUwgZIwDAYKKwYBBAGCN3sBATBmBgorBgEEAYI3ewICMFgwVgYIKwYBBQUHAgIwSh5IADMAMwBlADAAMQA5ADIAMQAtADQAZAA2ADQALQA0AGYAOABjAC0AYQAwADUANQAtADUAYgBkAGEAZgBmAGQANQBlADMAMwBkMAwGCisGAQQBgjd7AwIwDAYKKwYBBAGCN3sEAjAfBgNVHSMEGDAWgBTxRmjG8cPwKy19i2rhsvm-NfzRQTAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADggEBAKPjOlpIFqUJhjTDyaKZju7P26-JxO3YKxxpiuONQQggB8Pfq85rCh4TYMRHTRF9-SaF8UFmfyVp5MKCn4fUJyrYkEfjqrWoLand203HijTSRlrvcI7H4LdCBUE4oWD21md4XzcNZ61hmhlg0z_LvEFluaWR6FJJgORgK2V5zkvf8GxYZY8SiUSX0FogWYQc0rgwrb9F3zZBApnguvbEEAfZWGixBF7eZX5U89oac2ZpJ6yt5mIyLIWUbKqOIqoTVE7ZWq0g-rZNF6SxzSWnEzmKCIVtHC_4lvCAtexAJWshMLjYvNKYi9WTvrrtCKixbaK9Y4uLtPGLUgNVtMFnJIc&s=ePKC6T9Fn0leOo0uUWXu_Ns0TdjHpZodeBCiIdgr-jFB6wM146WyzK3IxUf-Bw_cto1C7wGjpoCqKS8_BruzpAuqjkJWjrDadjzQV0jS-vZlcHhJRgTQBBEIkdYDI_LsCS6N3rTKjzVnwyGOJtgorPdCo_yoVqx1W5YlAGffrkgfPg-es5njRkHA7cSYc06pyou7iiInkIaJ9Xz3CFoZQ_pc9iA3q_pdSQcvn_nTf0cixe_ubZol1JTU5tKyASR-Bbb5ATiHvbUuxio4NbLPtfjzePVnR6CNDefPksVAJ6m65XKSwNdMdXOYRnkaBwCQi4mRBlBFTQjuz3McSQNdLQ&h=uCZ-Wb0oFA-A2Gn0YU9ayom85WZFxyzc3umEQX6rs5I cache-control: - no-cache content-length: - '0' date: - - Thu, 25 May 2023 23:04:52 GMT + - Thu, 05 Mar 2026 05:53:06 GMT expires: - '-1' location: - - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Compute/locations/westus/operations/2b7662d0-8698-4b7f-9563-72dddd680efb?p=571046f6-b640-41c1-86f7-f9f044b5adf9&monitor=true&api-version=2024-11-01 + - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Compute/locations/westus/operations/c83ad088-146f-4f3a-970c-de7f1403bfee?p=7b868834-a188-4556-8f0e-38ea6fa689ce&monitor=true&api-version=2022-11-01&t=639082867865902301&c=MIIHhzCCBm-gAwIBAgITHgf_SDN0U0Mca1SI0QAAB_9IMzANBgkqhkiG9w0BAQsFADBEMRMwEQYKCZImiZPyLGQBGRYDR0JMMRMwEQYKCZImiZPyLGQBGRYDQU1FMRgwFgYDVQQDEw9BTUUgSW5mcmEgQ0EgMDYwHhcNMjYwMjE5MDYyODQwWhcNMjYwNTI0MjI1NzAzWjBAMT4wPAYDVQQDEzVhc3luY29wZXJhdGlvbnNpZ25pbmdjZXJ0aWZpY2F0ZS5tYW5hZ2VtZW50LmF6dXJlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3gBluMdrlD6ardxpqPmBSYIWUZhonBiNzi4-_Vw3g3Oro9RIUs6ZgPPueDGitorH-JQTqF81P_5QS92kcH5lXb6doPV1q_erI_e20ynOSwv5jOpwnB8O1qCCtpY__wHr-QcB0Lm4K2jLBpKfwOLTZXA5H1VEKxDXmEWP-EQELhaFdVIeJT0qwvEmhDGKnNn1-T2y6yZrYJaboDSEK8AB9KXc3mWLIwh8_G9twTikTy2PuHSzGUy9QLpQMImh2VVk5Ry0Pfpaypw8fNupz4-WjXfVayKHDyMN5Sbe7kFmbdxnISJ2hOyPiNlGB5nUXpHRpMys37mof_UwKdRJcixQECAwEAAaOCBHQwggRwMCcGCSsGAQQBgjcVCgQaMBgwCgYIKwYBBQUHAwEwCgYIKwYBBQUHAwIwPQYJKwYBBAGCNxUHBDAwLgYmKwYBBAGCNxUIhpDjDYTVtHiE8Ys-hZvdFs6dEoFghfmRS4WsmTQCAWQCAQcwggHLBggrBgEFBQcBAQSCAb0wggG5MGMGCCsGAQUFBzAChldodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20vcGtpaW5mcmEvQ2VydHMvQkwyUEtJSU5UQ0EwMi5BTUUuR0JMX0FNRSUyMEluZnJhJTIwQ0ElMjAwNi5jcnQwUwYIKwYBBQUHMAKGR2h0dHA6Ly9jcmwxLmFtZS5nYmwvYWlhL0JMMlBLSUlOVENBMDIuQU1FLkdCTF9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3J0MFMGCCsGAQUFBzAChkdodHRwOi8vY3JsMi5hbWUuZ2JsL2FpYS9CTDJQS0lJTlRDQTAyLkFNRS5HQkxfQU1FJTIwSW5mcmElMjBDQSUyMDA2LmNydDBTBggrBgEFBQcwAoZHaHR0cDovL2NybDMuYW1lLmdibC9haWEvQkwyUEtJSU5UQ0EwMi5BTUUuR0JMX0FNRSUyMEluZnJhJTIwQ0ElMjAwNi5jcnQwUwYIKwYBBQUHMAKGR2h0dHA6Ly9jcmw0LmFtZS5nYmwvYWlhL0JMMlBLSUlOVENBMDIuQU1FLkdCTF9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3J0MB0GA1UdDgQWBBSpvSUr5SOrbw_81Q-45DGZEMcVRTAOBgNVHQ8BAf8EBAMCBaAwggEmBgNVHR8EggEdMIIBGTCCARWgggERoIIBDYY_aHR0cDovL2NybC5taWNyb3NvZnQuY29tL3BraWluZnJhL0NSTC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMS5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMi5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMy5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsNC5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JsMIGdBgNVHSAEgZUwgZIwDAYKKwYBBAGCN3sBATBmBgorBgEEAYI3ewICMFgwVgYIKwYBBQUHAgIwSh5IADMAMwBlADAAMQA5ADIAMQAtADQAZAA2ADQALQA0AGYAOABjAC0AYQAwADUANQAtADUAYgBkAGEAZgBmAGQANQBlADMAMwBkMAwGCisGAQQBgjd7AwIwDAYKKwYBBAGCN3sEAjAfBgNVHSMEGDAWgBTxRmjG8cPwKy19i2rhsvm-NfzRQTAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADggEBAKPjOlpIFqUJhjTDyaKZju7P26-JxO3YKxxpiuONQQggB8Pfq85rCh4TYMRHTRF9-SaF8UFmfyVp5MKCn4fUJyrYkEfjqrWoLand203HijTSRlrvcI7H4LdCBUE4oWD21md4XzcNZ61hmhlg0z_LvEFluaWR6FJJgORgK2V5zkvf8GxYZY8SiUSX0FogWYQc0rgwrb9F3zZBApnguvbEEAfZWGixBF7eZX5U89oac2ZpJ6yt5mIyLIWUbKqOIqoTVE7ZWq0g-rZNF6SxzSWnEzmKCIVtHC_4lvCAtexAJWshMLjYvNKYi9WTvrrtCKixbaK9Y4uLtPGLUgNVtMFnJIc&s=ePKC6T9Fn0leOo0uUWXu_Ns0TdjHpZodeBCiIdgr-jFB6wM146WyzK3IxUf-Bw_cto1C7wGjpoCqKS8_BruzpAuqjkJWjrDadjzQV0jS-vZlcHhJRgTQBBEIkdYDI_LsCS6N3rTKjzVnwyGOJtgorPdCo_yoVqx1W5YlAGffrkgfPg-es5njRkHA7cSYc06pyou7iiInkIaJ9Xz3CFoZQ_pc9iA3q_pdSQcvn_nTf0cixe_ubZol1JTU5tKyASR-Bbb5ATiHvbUuxio4NbLPtfjzePVnR6CNDefPksVAJ6m65XKSwNdMdXOYRnkaBwCQi4mRBlBFTQjuz3McSQNdLQ&h=uCZ-Wb0oFA-A2Gn0YU9ayom85WZFxyzc3umEQX6rs5I pragma: - no-cache - server: - - Microsoft-HTTPAPI/2.0 - - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-need-to-refresh-epl-cache: + - 'False' + x-ms-operation-identifier: + - tenantId=ed94de55-1f87-4278-9651-525e7ba467d6,objectId=eeaac044-cddc-4cec-b91d-6c1da8fcdee0/westus/ba61ce66-5820-4909-a196-3cbb4863634a x-ms-ratelimit-remaining-resource: - - Microsoft.Compute/PutDeleteDedicatedHost3Min;115,Microsoft.Compute/PutDeleteDedicatedHost30Min;595 + - Microsoft.Compute/PutDeleteDedicatedHostSubscriptionMaximum;115 x-ms-ratelimit-remaining-subscription-deletes: - - '14998' + - '199' + x-ms-ratelimit-remaining-subscription-global-deletes: + - '2999' + x-msedge-ref: + - 'Ref A: 75467FF85A864E5B80CC9CF6146072D2 Ref B: SG2AA1070303062 Ref C: 2026-03-05T05:53:05Z' status: code: 202 - message: Accepted + message: '' - request: body: null headers: @@ -836,44 +963,46 @@ interactions: ParameterSetName: - -n --host-group -g --yes User-Agent: - - AZURECLI/2.47.0 (AAZ) azsdk-python-core/1.24.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Compute/locations/westus/operations/2b7662d0-8698-4b7f-9563-72dddd680efb?p=571046f6-b640-41c1-86f7-f9f044b5adf9&api-version=2024-11-01 + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Compute/locations/westus/operations/c83ad088-146f-4f3a-970c-de7f1403bfee?p=7b868834-a188-4556-8f0e-38ea6fa689ce&api-version=2022-11-01&t=639082867865902301&c=MIIHhzCCBm-gAwIBAgITHgf_SDN0U0Mca1SI0QAAB_9IMzANBgkqhkiG9w0BAQsFADBEMRMwEQYKCZImiZPyLGQBGRYDR0JMMRMwEQYKCZImiZPyLGQBGRYDQU1FMRgwFgYDVQQDEw9BTUUgSW5mcmEgQ0EgMDYwHhcNMjYwMjE5MDYyODQwWhcNMjYwNTI0MjI1NzAzWjBAMT4wPAYDVQQDEzVhc3luY29wZXJhdGlvbnNpZ25pbmdjZXJ0aWZpY2F0ZS5tYW5hZ2VtZW50LmF6dXJlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3gBluMdrlD6ardxpqPmBSYIWUZhonBiNzi4-_Vw3g3Oro9RIUs6ZgPPueDGitorH-JQTqF81P_5QS92kcH5lXb6doPV1q_erI_e20ynOSwv5jOpwnB8O1qCCtpY__wHr-QcB0Lm4K2jLBpKfwOLTZXA5H1VEKxDXmEWP-EQELhaFdVIeJT0qwvEmhDGKnNn1-T2y6yZrYJaboDSEK8AB9KXc3mWLIwh8_G9twTikTy2PuHSzGUy9QLpQMImh2VVk5Ry0Pfpaypw8fNupz4-WjXfVayKHDyMN5Sbe7kFmbdxnISJ2hOyPiNlGB5nUXpHRpMys37mof_UwKdRJcixQECAwEAAaOCBHQwggRwMCcGCSsGAQQBgjcVCgQaMBgwCgYIKwYBBQUHAwEwCgYIKwYBBQUHAwIwPQYJKwYBBAGCNxUHBDAwLgYmKwYBBAGCNxUIhpDjDYTVtHiE8Ys-hZvdFs6dEoFghfmRS4WsmTQCAWQCAQcwggHLBggrBgEFBQcBAQSCAb0wggG5MGMGCCsGAQUFBzAChldodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20vcGtpaW5mcmEvQ2VydHMvQkwyUEtJSU5UQ0EwMi5BTUUuR0JMX0FNRSUyMEluZnJhJTIwQ0ElMjAwNi5jcnQwUwYIKwYBBQUHMAKGR2h0dHA6Ly9jcmwxLmFtZS5nYmwvYWlhL0JMMlBLSUlOVENBMDIuQU1FLkdCTF9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3J0MFMGCCsGAQUFBzAChkdodHRwOi8vY3JsMi5hbWUuZ2JsL2FpYS9CTDJQS0lJTlRDQTAyLkFNRS5HQkxfQU1FJTIwSW5mcmElMjBDQSUyMDA2LmNydDBTBggrBgEFBQcwAoZHaHR0cDovL2NybDMuYW1lLmdibC9haWEvQkwyUEtJSU5UQ0EwMi5BTUUuR0JMX0FNRSUyMEluZnJhJTIwQ0ElMjAwNi5jcnQwUwYIKwYBBQUHMAKGR2h0dHA6Ly9jcmw0LmFtZS5nYmwvYWlhL0JMMlBLSUlOVENBMDIuQU1FLkdCTF9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3J0MB0GA1UdDgQWBBSpvSUr5SOrbw_81Q-45DGZEMcVRTAOBgNVHQ8BAf8EBAMCBaAwggEmBgNVHR8EggEdMIIBGTCCARWgggERoIIBDYY_aHR0cDovL2NybC5taWNyb3NvZnQuY29tL3BraWluZnJhL0NSTC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMS5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMi5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsMy5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JshjFodHRwOi8vY3JsNC5hbWUuZ2JsL2NybC9BTUUlMjBJbmZyYSUyMENBJTIwMDYuY3JsMIGdBgNVHSAEgZUwgZIwDAYKKwYBBAGCN3sBATBmBgorBgEEAYI3ewICMFgwVgYIKwYBBQUHAgIwSh5IADMAMwBlADAAMQA5ADIAMQAtADQAZAA2ADQALQA0AGYAOABjAC0AYQAwADUANQAtADUAYgBkAGEAZgBmAGQANQBlADMAMwBkMAwGCisGAQQBgjd7AwIwDAYKKwYBBAGCN3sEAjAfBgNVHSMEGDAWgBTxRmjG8cPwKy19i2rhsvm-NfzRQTAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADggEBAKPjOlpIFqUJhjTDyaKZju7P26-JxO3YKxxpiuONQQggB8Pfq85rCh4TYMRHTRF9-SaF8UFmfyVp5MKCn4fUJyrYkEfjqrWoLand203HijTSRlrvcI7H4LdCBUE4oWD21md4XzcNZ61hmhlg0z_LvEFluaWR6FJJgORgK2V5zkvf8GxYZY8SiUSX0FogWYQc0rgwrb9F3zZBApnguvbEEAfZWGixBF7eZX5U89oac2ZpJ6yt5mIyLIWUbKqOIqoTVE7ZWq0g-rZNF6SxzSWnEzmKCIVtHC_4lvCAtexAJWshMLjYvNKYi9WTvrrtCKixbaK9Y4uLtPGLUgNVtMFnJIc&s=ePKC6T9Fn0leOo0uUWXu_Ns0TdjHpZodeBCiIdgr-jFB6wM146WyzK3IxUf-Bw_cto1C7wGjpoCqKS8_BruzpAuqjkJWjrDadjzQV0jS-vZlcHhJRgTQBBEIkdYDI_LsCS6N3rTKjzVnwyGOJtgorPdCo_yoVqx1W5YlAGffrkgfPg-es5njRkHA7cSYc06pyou7iiInkIaJ9Xz3CFoZQ_pc9iA3q_pdSQcvn_nTf0cixe_ubZol1JTU5tKyASR-Bbb5ATiHvbUuxio4NbLPtfjzePVnR6CNDefPksVAJ6m65XKSwNdMdXOYRnkaBwCQi4mRBlBFTQjuz3McSQNdLQ&h=uCZ-Wb0oFA-A2Gn0YU9ayom85WZFxyzc3umEQX6rs5I response: body: - string: "{\r\n \"startTime\": \"2023-05-25T23:04:52.9883002+00:00\",\r\n \"endTime\": - \"2023-05-25T23:04:53.1289708+00:00\",\r\n \"status\": \"Succeeded\",\r\n - \ \"name\": \"2b7662d0-8698-4b7f-9563-72dddd680efb\"\r\n}" + string: "{\r\n \"startTime\": \"2026-03-05T05:53:06.5317685+00:00\",\r\n \"endTime\": + \"2026-03-05T05:53:06.719196+00:00\",\r\n \"status\": \"Succeeded\",\r\n + \ \"name\": \"c83ad088-146f-4f3a-970c-de7f1403bfee\"\r\n}" headers: cache-control: - no-cache content-length: - - '184' + - '183' content-type: - application/json; charset=utf-8 date: - - Thu, 25 May 2023 23:05:22 GMT + - Thu, 05 Mar 2026 05:53:06 GMT expires: - '-1' pragma: - no-cache - server: - - Microsoft-HTTPAPI/2.0 - - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-need-to-refresh-epl-cache: + - 'False' + x-ms-operation-identifier: + - tenantId=ed94de55-1f87-4278-9651-525e7ba467d6,objectId=eeaac044-cddc-4cec-b91d-6c1da8fcdee0/southeastasia/a534e78e-69e9-4c73-8e8d-763634d192df x-ms-ratelimit-remaining-resource: - - Microsoft.Compute/GetOperation3Min;14997,Microsoft.Compute/GetOperation30Min;29997 + - Microsoft.Compute/GetOperationResource;44,Microsoft.Compute/GetOperationSubscriptionMaximum;14996 + x-ms-ratelimit-remaining-subscription-global-reads: + - '3749' + x-msedge-ref: + - 'Ref A: 3C36180BFC10484196445073F0ED7E56 Ref B: SG2AA1070306031 Ref C: 2026-03-05T05:53:07Z' status: code: 200 - message: OK + message: '' - request: body: null headers: @@ -890,8 +1019,7 @@ interactions: ParameterSetName: - -n -g --yes User-Agent: - - AZURECLI/2.47.0 (AAZ) azsdk-python-core/1.24.0 Python/3.10.11 (Linux-5.15.0-1036-azure-x86_64-with-glibc2.31) - VSTS_7b238909-6802-4b65-b90d-184bca47f458_build_220_0 + - AZURECLI/2.84.0 azsdk-python-core/1.38.0 Python/3.12.10 (Windows-11-10.0.26200-SP0) method: DELETE uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vm_host_management_000001/providers/Microsoft.Compute/hostGroups/my-host-group?api-version=2019-03-01 response: @@ -903,23 +1031,30 @@ interactions: content-length: - '0' date: - - Thu, 25 May 2023 23:05:24 GMT + - Thu, 05 Mar 2026 05:53:08 GMT expires: - '-1' pragma: - no-cache - server: - - Microsoft-HTTPAPI/2.0 - - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE x-content-type-options: - nosniff + x-ms-need-to-refresh-epl-cache: + - 'False' + x-ms-operation-identifier: + - tenantId=ed94de55-1f87-4278-9651-525e7ba467d6,objectId=eeaac044-cddc-4cec-b91d-6c1da8fcdee0/westus/ef82e22d-1a35-438d-bd2b-befcfa242111 x-ms-ratelimit-remaining-resource: - - Microsoft.Compute/PutDeleteDedicatedHost3Min;114,Microsoft.Compute/PutDeleteDedicatedHost30Min;594 + - Microsoft.Compute/PutDeleteDedicatedHostSubscriptionMaximum;114 x-ms-ratelimit-remaining-subscription-deletes: - - '14998' + - '199' + x-ms-ratelimit-remaining-subscription-global-deletes: + - '2999' + x-msedge-ref: + - 'Ref A: F9CD9BB42932406B9FF7400081A93C2C Ref B: SG2AA1040517036 Ref C: 2026-03-05T05:53:08Z' status: code: 200 - message: OK + message: '' version: 1 diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/latest/test_vm_commands.py b/src/azure-cli/azure/cli/command_modules/vm/tests/latest/test_vm_commands.py index 45b1af81cf1..7e5f3b706bc 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/latest/test_vm_commands.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/latest/test_vm_commands.py @@ -9429,7 +9429,7 @@ def _assert_ids_equal(self, id_1, id_2, rg_prefix=None): # region dedicated host tests class DedicatedHostScenarioTest(ScenarioTest): - @ResourceGroupPreparer(name_prefix='cli_test_vm_host_management_', location='eastus') + @ResourceGroupPreparer(name_prefix='cli_test_vm_host_management_', location='westus') def test_vm_host_management(self, resource_group): self.kwargs.update({ 'host-group': 'my-host-group', @@ -9441,7 +9441,7 @@ def test_vm_host_management(self, resource_group): self.check('length(@)', 1), self.check('[0].name', '{host-group}') ]) - self.cmd('vm host create -n {host} --host-group {host-group} -d 2 -g {rg} --sku DSv3-Type1') + self.cmd('vm host create -n {host} --host-group {host-group} -d 2 -g {rg} --sku DCSv2-Type1') self.cmd('vm host list --host-group {host-group} -g {rg}', checks=[ self.check('length(@)', 1), self.check('[0].name', '{host}') From 44c596e3ddececa7199ec81ea110542845295938 Mon Sep 17 00:00:00 2001 From: william051200 Date: Fri, 6 Mar 2026 07:57:56 +0800 Subject: [PATCH 7/7] Update code --- .../azure/cli/command_modules/vm/custom.py | 4 +- .../azure/cli/command_modules/vm/custom.py~ | 6685 ----------------- 2 files changed, 2 insertions(+), 6687 deletions(-) delete mode 100644 src/azure-cli/azure/cli/command_modules/vm/custom.py~ diff --git a/src/azure-cli/azure/cli/command_modules/vm/custom.py b/src/azure-cli/azure/cli/command_modules/vm/custom.py index fa95315628a..245512e4041 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/custom.py +++ b/src/azure-cli/azure/cli/command_modules/vm/custom.py @@ -5938,8 +5938,8 @@ def create_dedicated_host(cmd, host_group_name, host_name, resource_group_name, } } - if tags: - command_args['tags'] = tags + if tags is not None: + command_args['tags'] = tags or {} if auto_replace_on_failure is not None: command_args['auto_replace_on_failure'] = auto_replace_on_failure diff --git a/src/azure-cli/azure/cli/command_modules/vm/custom.py~ b/src/azure-cli/azure/cli/command_modules/vm/custom.py~ deleted file mode 100644 index fa95315628a..00000000000 --- a/src/azure-cli/azure/cli/command_modules/vm/custom.py~ +++ /dev/null @@ -1,6685 +0,0 @@ -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -# Generation mode: Incremental -# -------------------------------------------------------------------------- - -# pylint: disable=no-self-use, too-many-lines, no-else-return -# pylint: disable=protected-access -import json -import os - -import requests - -# the urlopen is imported for automation purpose -from urllib.request import urlopen # noqa, pylint: disable=import-error,unused-import,ungrouped-imports - -from knack.log import get_logger -from knack.util import CLIError -from azure.cli.core.azclierror import ( - ResourceNotFoundError, - ValidationError, - RequiredArgumentMissingError, - ArgumentUsageError -) - -from azure.cli.command_modules.vm._validators import _get_resource_group_from_vault_name -from azure.cli.core.commands.validators import validate_file_or_dict - -from azure.cli.core.commands import LongRunningOperation, DeploymentOutputLongRunningOperation -from azure.cli.core.commands.client_factory import get_mgmt_service_client -from azure.cli.core.profiles import ResourceType -from azure.cli.core.util import sdk_no_wait - -from ._vm_utils import read_content_if_is_file, import_aaz_by_profile, IdentityType -from ._vm_diagnostics_templates import get_default_diag_config - -from ._actions import (load_images_from_aliases_doc, load_extension_images_thru_services, - load_images_thru_services, _get_latest_image_version, _get_latest_image_version_by_aaz) -from ._client_factory import (_compute_client_factory, cf_vm_image_term) - -from .aaz.latest.vm.disk import AttachDetachDataDisk -from .aaz.latest.vm import Update as UpdateVM - -from .generated.custom import * # noqa: F403, pylint: disable=unused-wildcard-import,wildcard-import - -try: - from .manual.custom import * # noqa: F403, pylint: disable=unused-wildcard-import,wildcard-import -except ImportError: - pass - -logger = get_logger(__name__) - - -# Use the same name by portal, so people can update from both cli and portal -# (VM doesn't allow multiple handlers for the same extension) -_ACCESS_EXT_HANDLER_NAME = 'enablevmaccess' - -_LINUX_ACCESS_EXT = 'VMAccessForLinux' -_WINDOWS_ACCESS_EXT = 'VMAccessAgent' -_LINUX_DIAG_EXT = 'LinuxDiagnostic' -_WINDOWS_DIAG_EXT = 'IaaSDiagnostics' -_LINUX_OMS_AGENT_EXT = 'OmsAgentForLinux' -_WINDOWS_OMS_AGENT_EXT = 'MicrosoftMonitoringAgent' -extension_mappings = { - _LINUX_ACCESS_EXT: { - 'version': '1.5', - 'publisher': 'Microsoft.OSTCExtensions' - }, - _WINDOWS_ACCESS_EXT: { - 'version': '2.4', - 'publisher': 'Microsoft.Compute' - }, - _LINUX_DIAG_EXT: { - 'version': '3.0', - 'publisher': 'Microsoft.Azure.Diagnostics' - }, - _WINDOWS_DIAG_EXT: { - 'version': '1.5', - 'publisher': 'Microsoft.Azure.Diagnostics' - }, - _LINUX_OMS_AGENT_EXT: { - 'version': '1.0', - 'publisher': 'Microsoft.EnterpriseCloud.Monitoring' - }, - _WINDOWS_OMS_AGENT_EXT: { - 'version': '1.0', - 'publisher': 'Microsoft.EnterpriseCloud.Monitoring' - } -} - -remove_basic_option_msg = "It's recommended to create with `%s`. " \ - "Please be aware that Basic option will be removed in the future." - - -def _construct_identity_info(identity_scope, identity_role, implicit_identity, external_identities): - info = {} - if identity_scope: - info['scope'] = identity_scope - info['role'] = str(identity_role) # could be DefaultStr, so convert to string - info['userAssignedIdentities'] = external_identities or {} - info['systemAssignedIdentity'] = implicit_identity or '' - return info - - -# for injecting test seams to produce predicatable role assignment id for playback -def _gen_guid(): - import uuid - return uuid.uuid4() - - -def _get_access_extension_upgrade_info(extensions, name): - version = extension_mappings[name]['version'] - publisher = extension_mappings[name]['publisher'] - - auto_upgrade = None - - if extensions: - extension = next((e for e in extensions if e.name == name), None) - from packaging.version import parse # pylint: disable=no-name-in-module,import-error - if extension and parse(extension.type_handler_version) < parse(version): - auto_upgrade = True - elif extension and parse(extension.type_handler_version) > parse(version): - version = extension.type_handler_version - - return publisher, version, auto_upgrade - - -# separated for aaz based implementation -def _get_access_extension_upgrade_info_aaz(extensions, name): - version = extension_mappings[name]['version'] - publisher = extension_mappings[name]['publisher'] - - auto_upgrade = None - - if extensions: - extension = next((e for e in extensions if e.get('name', '') == name), None) - from packaging.version import parse # pylint: disable=no-name-in-module,import-error - if extension and parse(extension['typeHandlerVersion']) < parse(version): - auto_upgrade = True - elif extension and parse(extension['typeHandlerVersion']) > parse(version): - version = extension['typeHandlerVersion'] - - return publisher, version, auto_upgrade - - -def _get_extension_instance_name(instance_view, publisher, extension_type_name, - suggested_name=None): - extension_instance_name = suggested_name or extension_type_name - full_type_name = '.'.join([publisher, extension_type_name]) - if instance_view.extensions: - ext = next((x for x in instance_view.extensions - if x.type and (x.type.lower() == full_type_name.lower())), None) - if ext: - extension_instance_name = ext.name - return extension_instance_name - - -# separated for aaz based implementation -def _get_extension_instance_name_aaz(instance_view, publisher, extension_type_name, - suggested_name=None): - extension_instance_name = suggested_name or extension_type_name - full_type_name = '.'.join([publisher, extension_type_name]) - if extensions := instance_view.get('extensions', []): - ext = next((x for x in extensions if x.get('type', '').lower() == full_type_name.lower()), None) - if ext: - extension_instance_name = ext['name'] - return extension_instance_name - - -def _get_storage_management_client(cli_ctx): - return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_STORAGE) - - -def _get_disk_lun(data_disks): - # start from 0, search for unused int for lun - if not data_disks: - return 0 - - existing_luns = sorted([d.lun for d in data_disks]) - for i, current in enumerate(existing_luns): - if current != i: - return i - return len(existing_luns) - - -def _get_disk_lun_by_aaz(data_disks): - # start from 0, search for unused int for lun - if not data_disks: - return 0 - - existing_luns = sorted([d['lun'] for d in data_disks]) - for i, current in enumerate(existing_luns): - if current != i: - return i - return len(existing_luns) - - -def _get_private_config(cli_ctx, resource_group_name, storage_account): - storage_mgmt_client = _get_storage_management_client(cli_ctx) - # pylint: disable=no-member - keys = storage_mgmt_client.storage_accounts.list_keys(resource_group_name, storage_account).keys - - private_config = { - 'storageAccountName': storage_account, - 'storageAccountKey': keys[0].value - } - return private_config - - -def _get_resource_group_location(cli_ctx, resource_group_name): - client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES) - # pylint: disable=no-member - return client.resource_groups.get(resource_group_name).location - - -def _get_sku_object(cmd, sku): - if cmd.supported_api_version(min_api='2017-03-30'): - DiskSku = cmd.get_models('DiskSku') - return DiskSku(name=sku) - return sku - - -def get_hyper_v_generation_from_vmss(cli_ctx, image_ref, location): # pylint: disable=too-many-return-statements - from ._vm_utils import (is_valid_image_version_id, parse_gallery_image_id, is_valid_vm_image_id, parse_vm_image_id, - parse_shared_gallery_image_id, parse_community_gallery_image_id) - if image_ref is None: - return None - if image_ref.id: - from ._client_factory import _compute_client_factory - if is_valid_image_version_id(image_ref.id): - image_info = parse_gallery_image_id(image_ref.id) - client = _compute_client_factory(cli_ctx, subscription_id=image_info[0]).gallery_images - gallery_image_info = client.get( - resource_group_name=image_info[1], gallery_name=image_info[2], gallery_image_name=image_info[3]) - return gallery_image_info.hyper_v_generation if hasattr(gallery_image_info, 'hyper_v_generation') else None - if is_valid_vm_image_id(image_ref.id): - sub, rg, image_name = parse_vm_image_id(image_ref.id) - client = _compute_client_factory(cli_ctx, subscription_id=sub).images - image_info = client.get(rg, image_name) - return image_info.hyper_v_generation if hasattr(image_info, 'hyper_v_generation') else None - - if image_ref.shared_gallery_image_id is not None: - from ._client_factory import cf_shared_gallery_image - image_info = parse_shared_gallery_image_id(image_ref.shared_gallery_image_id) - gallery_image_info = cf_shared_gallery_image(cli_ctx).get( - location=location, gallery_unique_name=image_info[0], gallery_image_name=image_info[1]) - return gallery_image_info.hyper_v_generation if hasattr(gallery_image_info, 'hyper_v_generation') else None - - if image_ref.community_gallery_image_id is not None: - from ._client_factory import cf_community_gallery_image - image_info = parse_community_gallery_image_id(image_ref.community_gallery_image_id) - gallery_image_info = cf_community_gallery_image(cli_ctx).get( - location=location, public_gallery_name=image_info[0], gallery_image_name=image_info[1]) - return gallery_image_info.hyper_v_generation if hasattr(gallery_image_info, 'hyper_v_generation') else None - - if image_ref.offer and image_ref.publisher and image_ref.sku and image_ref.version: - from ._client_factory import cf_vm_image - version = image_ref.version - if version.lower() == 'latest': - from ._actions import _get_latest_image_version - version = _get_latest_image_version(cli_ctx, location, image_ref.publisher, image_ref.offer, - image_ref.sku) - vm_image_info = cf_vm_image(cli_ctx, '').get( - location, image_ref.publisher, image_ref.offer, image_ref.sku, version) - return vm_image_info.hyper_v_generation if hasattr(vm_image_info, 'hyper_v_generation') else None - - return None - - -def get_hyper_v_generation_from_vmss_by_aaz(cli_ctx, image_ref, location): # pylint: disable=too-many-return-statements - from ._vm_utils import (is_valid_image_version_id, parse_gallery_image_id, is_valid_vm_image_id, parse_vm_image_id, - parse_shared_gallery_image_id, parse_community_gallery_image_id) - if image_ref is None: - return None - if image_ref.get("id", None) is not None: - from ._client_factory import _compute_client_factory - if is_valid_image_version_id(image_ref["id"]): - image_info = parse_gallery_image_id(image_ref["id"]) - client = _compute_client_factory(cli_ctx, subscription_id=image_info[0]).gallery_images - gallery_image_info = client.get( - resource_group_name=image_info[1], gallery_name=image_info[2], gallery_image_name=image_info[3]) - return gallery_image_info.hyper_v_generation if hasattr(gallery_image_info, 'hyper_v_generation') else None - if is_valid_vm_image_id(image_ref["id"]): - sub, rg, image_name = parse_vm_image_id(image_ref["id"]) - client = _compute_client_factory(cli_ctx, subscription_id=sub).images - image_info = client.get(rg, image_name) - return image_info.hyper_v_generation if hasattr(image_info, 'hyper_v_generation') else None - - if image_ref.get("sharedGalleryImageId", None) is not None: - from ._client_factory import cf_shared_gallery_image - image_info = parse_shared_gallery_image_id(image_ref["sharedGalleryImageId"]) - gallery_image_info = cf_shared_gallery_image(cli_ctx).get( - location=location, gallery_unique_name=image_info[0], gallery_image_name=image_info[1]) - return gallery_image_info.hyper_v_generation if hasattr(gallery_image_info, 'hyper_v_generation') else None - - if image_ref.get("communityGalleryImageId", None) is not None: - from ._client_factory import cf_community_gallery_image - image_info = parse_community_gallery_image_id(image_ref["communityGalleryImageId"]) - gallery_image_info = cf_community_gallery_image(cli_ctx).get( - location=location, public_gallery_name=image_info[0], gallery_image_name=image_info[1]) - return gallery_image_info.hyper_v_generation if hasattr(gallery_image_info, 'hyper_v_generation') else None - - if image_ref.get("offer", None) is not None and image_ref.get("publisher", None) is not None \ - and image_ref.get("sku", None) is not None and image_ref.get("version", None) is not None: - from ._client_factory import cf_vm_image - version = image_ref["version"] - if version.lower() == 'latest': - from ._actions import _get_latest_image_version - version = _get_latest_image_version(cli_ctx, location, image_ref["publisher"], image_ref["offer"], - image_ref["sku"]) - vm_image_info = cf_vm_image(cli_ctx, '').get( - location, image_ref["publisher"], image_ref["offer"], image_ref["sku"], version) - return vm_image_info.hyper_v_generation if hasattr(vm_image_info, 'hyper_v_generation') else None - - return None - - -def _is_linux_os(vm): - os_type = None - if vm and vm.storage_profile and vm.storage_profile.os_disk and vm.storage_profile.os_disk.os_type: - os_type = vm.storage_profile.os_disk.os_type - if os_type: - return os_type.lower() == 'linux' - # the os_type could be None for VM scaleset, let us check out os configurations - if vm.os_profile.linux_configuration: - return bool(vm.os_profile.linux_configuration) - return False - - -def _is_linux_os_by_aaz(vm): - os_type = None - if vm.get("storage_profile", {}).get("os_disk", {}).get("os_type", None) is not None: - os_type = vm["storage_profile"]["os_disk"]["os_type"] - if os_type: - return os_type.lower() == 'linux' - # the os_type could be None for VM scaleset, let us check out os configurations - if vm.get("os_profile", {}).get("linux_configuration", None) is not None: - return bool(vm["os_profile"]["linux_configuration"]) - return False - - -# separated for aaz implementation -def _is_linux_os_aaz(vm): - if os_type := vm.get('storageProfile', {}).get('osDisk', {}).get('osType', None): - return os_type.lower() == 'linux' - # the os_type could be None for VM scaleset, let us check out os configurations - if linux_config := vm.get('osProfile', {}).get('linuxConfiguration', ''): - return bool(linux_config) - return False - - -def _merge_secrets(secrets): - """ - Merge a list of secrets. Each secret should be a dict fitting the following JSON structure: - [{ "sourceVault": { "id": "value" }, - "vaultCertificates": [{ "certificateUrl": "value", - "certificateStore": "cert store name (only on windows)"}] }] - The array of secrets is merged on sourceVault.id. - :param secrets: - :return: - """ - merged = {} - vc_name = 'vaultCertificates' - for outer in secrets: - for secret in outer: - if secret['sourceVault']['id'] not in merged: - merged[secret['sourceVault']['id']] = [] - merged[secret['sourceVault']['id']] = \ - secret[vc_name] + merged[secret['sourceVault']['id']] - - # transform the reduced map to vm format - formatted = [{'sourceVault': {'id': source_id}, - 'vaultCertificates': value} - for source_id, value in list(merged.items())] - return formatted - - -def _normalize_extension_version(cli_ctx, publisher, vm_extension_name, version, location): - - def _trim_away_build_number(version): - # workaround a known issue: the version must only contain "major.minor", even though - # "extension image list" gives more detail - return '.'.join(version.split('.')[0:2]) - - if not version: - result = load_extension_images_thru_services(cli_ctx, publisher, vm_extension_name, None, location, - show_latest=True, partial_match=False) - if not result: - raise CLIError('Failed to find the latest version for the extension "{}"'.format(vm_extension_name)) - # with 'show_latest' enabled, we will only get one result. - version = result[0]['version'] - - version = _trim_away_build_number(version) - return version - - -def _parse_rg_name(strid): - '''From an ID, extract the contained (resource group, name) tuple.''' - from azure.mgmt.core.tools import parse_resource_id - parts = parse_resource_id(strid) - return (parts['resource_group'], parts['name']) - - -def _set_sku(cmd, instance, sku): - if cmd.supported_api_version(min_api='2017-03-30'): - instance.sku = cmd.get_models('DiskSku')(name=sku) - else: - instance.account_type = sku - - -def _show_missing_access_warning(resource_group, name, command): - warn = ("No access was given yet to the '{1}', because '--scope' was not provided. " - "You should setup by creating a role assignment, e.g. " - "'az role assignment create --assignee --role contributor -g {0}' " - "would let it access the current resource group. To get the pricipal id, run " - "'az {2} show -g {0} -n {1} --query \"identity.principalId\" -otsv'".format(resource_group, name, command)) - logger.warning(warn) - - -def _parse_aux_subscriptions(resource_id): - from azure.mgmt.core.tools import is_valid_resource_id, parse_resource_id - if is_valid_resource_id(resource_id): - res = parse_resource_id(resource_id) - return [res['subscription']] - return None - - -# Hide extension information from output as the info is not correct and unhelpful; also -# commands using it mean to hide the extension concept from users. -class ExtensionUpdateLongRunningOperation(LongRunningOperation): # pylint: disable=too-few-public-methods - pass - - -# region Disks (Managed) -def create_managed_disk(cmd, resource_group_name, disk_name, location=None, # pylint: disable=too-many-locals, too-many-branches, too-many-statements, line-too-long - size_gb=None, sku='Premium_LRS', os_type=None, - source=None, for_upload=None, upload_size_bytes=None, # pylint: disable=unused-argument - # below are generated internally from 'source' - source_blob_uri=None, source_disk=None, source_snapshot=None, source_restore_point=None, - source_storage_account_id=None, no_wait=False, tags=None, zone=None, - disk_iops_read_write=None, disk_mbps_read_write=None, hyper_v_generation=None, - encryption_type=None, disk_encryption_set=None, max_shares=None, - disk_iops_read_only=None, disk_mbps_read_only=None, - image_reference=None, image_reference_lun=None, - gallery_image_reference=None, gallery_image_reference_lun=None, - network_access_policy=None, disk_access=None, logical_sector_size=None, - tier=None, enable_bursting=None, edge_zone=None, security_type=None, support_hibernation=None, - public_network_access=None, accelerated_network=None, architecture=None, - data_access_auth_mode=None, gallery_image_reference_type=None, security_data_uri=None, - upload_type=None, secure_vm_disk_encryption_set=None, performance_plus=None, - optimized_for_frequent_attach=None, security_metadata_uri=None, action_on_disk_delay=None, - supported_security_option=None): - - from azure.mgmt.core.tools import resource_id, is_valid_resource_id - from azure.cli.core.commands.client_factory import get_subscription_id - - location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) - if security_data_uri: - option = 'ImportSecure' - elif source_blob_uri: - option = 'Import' - elif source_disk or source_snapshot: - option = 'Copy' - elif source_restore_point: - option = 'Restore' - elif upload_type == 'Upload': - option = 'Upload' - elif upload_type == 'UploadWithSecurityData': - option = 'UploadPreparedSecure' - elif image_reference or gallery_image_reference: - option = 'FromImage' - else: - option = 'Empty' - - if source_storage_account_id is None and source_blob_uri is not None: - subscription_id = get_subscription_id(cmd.cli_ctx) - storage_account_name = source_blob_uri.split('.')[0].split('/')[-1] - source_storage_account_id = resource_id( - subscription=subscription_id, resource_group=resource_group_name, - namespace='Microsoft.Storage', type='storageAccounts', name=storage_account_name) - - if upload_size_bytes is not None and not upload_type: - raise RequiredArgumentMissingError( - 'usage error: --upload-size-bytes should be used together with --upload-type') - - from ._constants import COMPATIBLE_SECURITY_TYPE_VALUE, UPGRADE_SECURITY_HINT - if image_reference is not None: - if not is_valid_resource_id(image_reference): - # URN or name - terms = image_reference.split(':') - if len(terms) == 4: # URN - disk_publisher, disk_offer, disk_sku, disk_version = terms[0], terms[1], terms[2], terms[3] - if disk_version.lower() == 'latest': - disk_version = _get_latest_image_version_by_aaz(cmd.cli_ctx, location, disk_publisher, disk_offer, - disk_sku) - else: # error - raise CLIError('usage error: --image-reference should be ID or URN (publisher:offer:sku:version).') - else: - from azure.mgmt.core.tools import parse_resource_id - terms = parse_resource_id(image_reference) - disk_publisher, disk_offer, disk_sku, disk_version = \ - terms['child_name_1'], terms['child_name_3'], terms['child_name_4'], terms['child_name_5'] - - from .aaz.latest.vm.image import Show as VmImageShow - command_args = { - 'location': location, - 'offer': disk_offer, - 'publisher': disk_publisher, - 'sku': disk_sku, - 'version': disk_version, - } - response = VmImageShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) - - if response.get('hyper_v_generation'): - if response.get('hyper_v_generation') == 'V1': - logger.warning(UPGRADE_SECURITY_HINT) - elif response.get('hyper_v_generation') == 'V2': - # set default value of hyper_v_generation - if hyper_v_generation == 'V1': - hyper_v_generation = 'V2' - # set default value of security_type - if not security_type: - security_type = 'TrustedLaunch' - if security_type != 'TrustedLaunch': - logger.warning(UPGRADE_SECURITY_HINT) - - # image_reference is an ID now - image_reference = {'id': response.get('id')} - if image_reference_lun is not None: - image_reference['lun'] = image_reference_lun - - if gallery_image_reference is not None: - if not security_type: - security_type = 'Standard' - if security_type != 'TrustedLaunch': - logger.warning(UPGRADE_SECURITY_HINT) - - key = gallery_image_reference_type if gallery_image_reference_type else 'id' - gallery_image_reference = {key: gallery_image_reference} - if gallery_image_reference_lun is not None: - gallery_image_reference['lun'] = gallery_image_reference_lun - - creation_data = { - "create_option": option, - "source_uri": source_blob_uri, - "image_reference": image_reference, - "gallery_image_reference": gallery_image_reference, - "source_resource_id": source_disk or source_snapshot or source_restore_point, - "storage_account_id": source_storage_account_id, - "upload_size_bytes": upload_size_bytes, - "logical_sector_size": logical_sector_size, - "security_data_uri": security_data_uri, - "performance_plus": performance_plus, - "security_metadata_uri": security_metadata_uri, - } - - if size_gb is None and option == "Empty": - raise RequiredArgumentMissingError( - 'usage error: --size-gb is required to create an empty disk') - if upload_size_bytes is None and upload_type: - raise RequiredArgumentMissingError( - 'usage error: --upload-size-bytes is required to create a disk for upload') - - if disk_encryption_set is not None and not is_valid_resource_id(disk_encryption_set): - disk_encryption_set = resource_id( - subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, - namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set) - - if disk_access is not None and not is_valid_resource_id(disk_access): - disk_access = resource_id( - subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, - namespace='Microsoft.Compute', type='diskAccesses', name=disk_access) - - if secure_vm_disk_encryption_set is not None and not is_valid_resource_id(secure_vm_disk_encryption_set): - secure_vm_disk_encryption_set = resource_id( - subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, - namespace='Microsoft.Compute', type='diskEncryptionSets', name=secure_vm_disk_encryption_set) - - encryption = None - if disk_encryption_set or encryption_type: - encryption = { - "type": encryption_type, - "disk_encryption_set_id": disk_encryption_set - } - - sku = {"name": sku} - - args = { - "location": location, - "creation_data": creation_data, - "tags": tags or {}, - "sku": sku, - "disk_size_gb": size_gb, - "os_type": os_type, - "encryption": encryption - } - - if hyper_v_generation: - args["hyper_v_generation"] = hyper_v_generation - - if zone: - args["zones"] = zone - if disk_iops_read_write is not None: - args["disk_iops_read_write"] = disk_iops_read_write - if disk_mbps_read_write is not None: - args["disk_m_bps_read_write"] = disk_mbps_read_write - if max_shares is not None: - args["max_shares"] = max_shares - if disk_iops_read_only is not None: - args["disk_iops_read_only"] = disk_iops_read_only - if disk_mbps_read_only is not None: - args["disk_m_bps_read_only"] = disk_mbps_read_only - if network_access_policy is not None: - args["network_access_policy"] = network_access_policy - if disk_access is not None: - args["disk_access_id"] = disk_access - if tier is not None: - args["tier"] = tier - if enable_bursting is not None: - args["bursting_enabled"] = enable_bursting - if edge_zone is not None: - args["extended_location"] = edge_zone - # The `Standard` is used for backward compatibility to allow customers to keep their current behavior - # after changing the default values to Trusted Launch VMs in the future. - if security_type and security_type != COMPATIBLE_SECURITY_TYPE_VALUE: - args["security_profile"] = {'securityType': security_type} - if secure_vm_disk_encryption_set: - args["security_profile"]["secure_vm_disk_encryption_set_id"] = secure_vm_disk_encryption_set - if support_hibernation is not None: - args["supports_hibernation"] = support_hibernation - if public_network_access is not None: - args["public_network_access"] = public_network_access - if accelerated_network is not None or architecture is not None or supported_security_option is not None: - if args.get("supported_capabilities", None) is None: - supported_capabilities = { - "accelerated_network": accelerated_network, - "architecture": architecture, - "supported_security_option": supported_security_option - } - args["supported_capabilities"] = supported_capabilities - else: - args["supported_capabilities"]["accelerated_network"] = accelerated_network - args["supported_capabilities"]["architecture"] = architecture - args["supported_capabilities"]["supported_security_option"] = supported_security_option - if data_access_auth_mode is not None: - args["data_access_auth_mode"] = data_access_auth_mode - if optimized_for_frequent_attach is not None: - args["optimized_for_frequent_attach"] = optimized_for_frequent_attach - if action_on_disk_delay is not None: - args["availability_policy"] = {'action_on_disk_delay': action_on_disk_delay} - - args["no_wait"] = no_wait - args["disk_name"] = disk_name - args["resource_group"] = resource_group_name - - from .aaz.latest.disk import Create - return Create(cli_ctx=cmd.cli_ctx)(command_args=args) - - -# region Images (Managed) -def create_image(cmd, resource_group_name, name, source, os_type=None, data_disk_sources=None, location=None, # pylint: disable=too-many-locals,unused-argument - # below are generated internally from 'source' and 'data_disk_sources' - source_virtual_machine=None, storage_sku=None, hyper_v_generation=None, - os_blob_uri=None, data_blob_uris=None, - os_snapshot=None, data_snapshots=None, - os_disk=None, os_disk_caching=None, data_disks=None, data_disk_caching=None, - tags=None, zone_resilient=None, edge_zone=None): - if source_virtual_machine: - location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) - image_storage_profile = None if zone_resilient is None else {"zone_resilient": zone_resilient} - args = { - "location": location, - "source_virtual_machine": {"id": source_virtual_machine}, - "storage_profile": image_storage_profile, - "tags": tags or {} - } - else: - os_disk = { - "os_type": os_type, - "os_state": "Generalized", - "caching": os_disk_caching, - "snapshot": {"id": os_snapshot} if os_snapshot else None, - "managed_disk": {"id": os_disk} if os_disk else None, - "blob_uri": os_blob_uri, - "storage_account_type": storage_sku - } - all_data_disks = [] - lun = 0 - if data_blob_uris: - for d in data_blob_uris: - all_data_disks.append({ - "lun": lun, - "blob_uri": d, - "caching": data_disk_caching - }) - lun += 1 - if data_snapshots: - for d in data_snapshots: - all_data_disks.append({ - "lun": lun, - "snapshot": {"id": d}, - "caching": data_disk_caching - }) - lun += 1 - if data_disks: - for d in data_disks: - all_data_disks.append({ - "lun": lun, - "managed_disk": {"id": d}, - "caching": data_disk_caching - }) - lun += 1 - - image_storage_profile = { - "os_disk": os_disk, - "data_disks": all_data_disks - } - if zone_resilient is not None: - image_storage_profile["zone_resilient"] = zone_resilient - location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) - # pylint disable=no-member - args = { - "location": location, - "storage_profile": image_storage_profile, - "tags": tags or {} - } - - if hyper_v_generation: - args["hyper_v_generation"] = hyper_v_generation - - if edge_zone: - args["extended_location"] = edge_zone - - args["image_name"] = name - args["resource_group"] = resource_group_name - - from .aaz.latest.image import Create - return Create(cli_ctx=cmd.cli_ctx)(command_args=args) - - -# region Snapshots -# pylint: disable=unused-argument,too-many-locals -def create_snapshot(cmd, resource_group_name, snapshot_name, location=None, size_gb=None, sku='Standard_LRS', - source=None, for_upload=None, copy_start=None, incremental=None, - # below are generated internally from 'source' - source_blob_uri=None, source_disk=None, source_snapshot=None, source_storage_account_id=None, - hyper_v_generation=None, tags=None, no_wait=False, disk_encryption_set=None, - encryption_type=None, network_access_policy=None, disk_access=None, edge_zone=None, - public_network_access=None, accelerated_network=None, architecture=None, - elastic_san_resource_id=None, bandwidth_copy_speed=None, instant_access_duration_minutes=None): - from azure.mgmt.core.tools import resource_id, is_valid_resource_id - from azure.cli.core.commands.client_factory import get_subscription_id - - location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) - if source_blob_uri: - option = 'Import' - elif source_disk or source_snapshot: - option = 'CopyStart' if copy_start else 'Copy' - elif for_upload: - option = 'Upload' - elif elastic_san_resource_id: - option = 'CopyFromSanSnapshot' - else: - option = 'Empty' - - creation_data = { - 'create_option': option, - 'source_uri': source_blob_uri, - 'image_reference': None, - 'source_resource_id': source_disk or source_snapshot, - 'storage_account_id': source_storage_account_id, - 'elastic_san_resource_id': elastic_san_resource_id, - 'provisioned_bandwidth_copy_speed': bandwidth_copy_speed, - 'instant_access_duration_minutes': instant_access_duration_minutes - } - - if size_gb is None and option == 'Empty': - raise CLIError('Please supply size for the snapshots') - - if disk_encryption_set is not None and not is_valid_resource_id(disk_encryption_set): - disk_encryption_set = resource_id( - subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, - namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set) - - if disk_access is not None and not is_valid_resource_id(disk_access): - disk_access = resource_id( - subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, - namespace='Microsoft.Compute', type='diskAccesses', name=disk_access) - - if disk_encryption_set is not None and encryption_type is None: - raise CLIError('usage error: Please specify --encryption-type.') - if encryption_type is not None: - encryption = { - 'type': encryption_type, - 'disk_encryption_set_id': disk_encryption_set - } - else: - encryption = None - - args = { - 'location': location, - 'creation_data': creation_data, - 'tags': tags or {}, - 'sku': {'name': sku}, - 'disk_size_gb': size_gb, - 'incremental': incremental, - 'encryption': encryption, - } - - if hyper_v_generation: - args['hyper_v_generation'] = hyper_v_generation - if network_access_policy is not None: - args['network_access_policy'] = network_access_policy - if disk_access is not None: - args['disk_access_id'] = disk_access - if edge_zone: - args['extended_location'] = edge_zone - if public_network_access is not None: - args['public_network_access'] = public_network_access - if accelerated_network is not None or architecture is not None: - if args.get('supported_capabilities', None) is None: - supported_capabilities = { - 'accelerated_network': accelerated_network, - 'architecture': architecture - } - args['supported_capabilities'] = supported_capabilities - else: - args['supported_capabilities']['accelerated_network'] = accelerated_network - args['supported_capabilities']['architecture'] = architecture - - args['snapshot_name'] = snapshot_name - args['resource_group'] = resource_group_name - args['no_wait'] = no_wait - - from .aaz.latest.snapshot import Create - return Create(cli_ctx=cmd.cli_ctx)(command_args=args) - - -# region VirtualMachines Identity -def show_vm_identity(cmd, resource_group_name, vm_name): - vm = get_vm_by_aaz(cmd, resource_group_name, vm_name) - - identity = vm.get("identity", {}) if vm else None - - if identity and not identity.get('userAssignedIdentities'): - identity['userAssignedIdentities'] = None - - return identity or None - - -def show_vmss_identity(cmd, resource_group_name, vm_name): - vm = get_vmss_by_aaz(cmd, resource_group_name, vm_name) - return vm.get("identity", {}) if vm else None - - -def assign_vm_identity(cmd, resource_group_name, vm_name, assign_identity=None, identity_role=None, - identity_role_id=None, identity_scope=None): - identity, _, external_identities, enable_local_identity = _build_identities_info(assign_identity) - - command_args = {'resource_group': resource_group_name, 'vm_name': vm_name} - - def getter(): - return get_vm_by_aaz(cmd, resource_group_name, vm_name) - - def setter(vm, external_identities=external_identities): - if vm.get('identity', {}).get('type', None) == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value: - identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value - elif vm.get('identity', {}).get('type', None) == IdentityType.SYSTEM_ASSIGNED.value and external_identities: - identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value - elif vm.get('identity', {}).get('type', None) == IdentityType.USER_ASSIGNED.value and enable_local_identity: - identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value - elif external_identities and enable_local_identity: - identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value - elif external_identities: - identity_types = IdentityType.USER_ASSIGNED.value - else: - identity_types = IdentityType.SYSTEM_ASSIGNED.value - - if identity_types == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value: - command_args['mi_system_assigned'] = "True" - command_args['mi_user_assigned'] = [] - elif identity_types == IdentityType.USER_ASSIGNED.value: - command_args['mi_user_assigned'] = [] - else: - command_args['mi_system_assigned'] = "True" - command_args['mi_user_assigned'] = [] - - if vm.get('identity', {}).get('userAssignedIdentities', None): - for key in vm.get('identity').get('userAssignedIdentities').keys(): - command_args['mi_user_assigned'].append(key) - - if identity.get('userAssignedIdentities'): - for key in identity.get('userAssignedIdentities', {}).keys(): - if key not in command_args['mi_user_assigned']: - command_args['mi_user_assigned'].append(key) - - from .operations.vm import VMPatch - update_vm_identity = VMPatch(cli_ctx=cmd.cli_ctx)(command_args=command_args) - LongRunningOperation(cmd.cli_ctx)(update_vm_identity) - result = update_vm_identity.result() - return result - - from ._vm_utils import assign_identity as assign_identity_helper - assign_identity_helper(cmd.cli_ctx, getter, setter, identity_role=identity_role_id, identity_scope=identity_scope) - - vm = getter() - return _construct_identity_info( - identity_scope, - identity_role, - vm.get('identity').get('principalId') if vm.get('identity') else None, - vm.get('identity').get('userAssignedIdentities') if vm.get('identity') else None) -# endregion - - -# region VirtualMachines -def capture_vm(cmd, resource_group_name, vm_name, vhd_name_prefix, - storage_container='vhds', overwrite=True): - VirtualMachineCaptureParameters = cmd.get_models('VirtualMachineCaptureParameters') - client = _compute_client_factory(cmd.cli_ctx) - parameter = VirtualMachineCaptureParameters(vhd_prefix=vhd_name_prefix, - destination_container_name=storage_container, - overwrite_vhds=overwrite) - poller = client.virtual_machines.begin_capture(resource_group_name, vm_name, parameter) - result = LongRunningOperation(cmd.cli_ctx)(poller) - output = getattr(result, 'output', None) or result.resources[0] - print(json.dumps(output, indent=2)) # pylint: disable=no-member - - -# pylint: disable=too-many-locals, unused-argument, too-many-statements, too-many-branches, broad-except -def create_vm(cmd, vm_name, resource_group_name, image=None, size='Standard_DS1_v2', location=None, tags=None, - no_wait=False, authentication_type=None, admin_password=None, computer_name=None, - admin_username=None, ssh_dest_key_path=None, ssh_key_value=None, generate_ssh_keys=False, - availability_set=None, nics=None, nsg=None, nsg_rule=None, accelerated_networking=None, - private_ip_address=None, public_ip_address=None, public_ip_address_allocation='dynamic', - public_ip_address_dns_name=None, public_ip_sku=None, os_disk_name=None, os_type=None, - storage_account=None, os_caching=None, data_caching=None, storage_container_name=None, storage_sku=None, - use_unmanaged_disk=False, attach_os_disk=None, os_disk_size_gb=None, attach_data_disks=None, - data_disk_sizes_gb=None, disk_info=None, - vnet_name=None, vnet_address_prefix='10.0.0.0/16', subnet=None, subnet_address_prefix='10.0.0.0/24', - storage_profile=None, os_publisher=None, os_offer=None, os_sku=None, os_version=None, - storage_account_type=None, vnet_type=None, nsg_type=None, public_ip_address_type=None, nic_type=None, - validate=False, custom_data=None, secrets=None, plan_name=None, plan_product=None, plan_publisher=None, - plan_promotion_code=None, license_type=None, assign_identity=None, identity_scope=None, - identity_role=None, identity_role_id=None, encryption_identity=None, - application_security_groups=None, zone=None, boot_diagnostics_storage=None, ultra_ssd_enabled=None, - ephemeral_os_disk=None, ephemeral_os_disk_placement=None, - proximity_placement_group=None, dedicated_host=None, dedicated_host_group=None, aux_subscriptions=None, - priority=None, max_price=None, eviction_policy=None, enable_agent=None, workspace=None, vmss=None, - os_disk_encryption_set=None, data_disk_encryption_sets=None, specialized=None, - encryption_at_host=None, enable_auto_update=None, patch_mode=None, ssh_key_name=None, - enable_hotpatching=None, platform_fault_domain=None, security_type=None, enable_secure_boot=None, - enable_vtpm=None, count=None, edge_zone=None, nic_delete_option=None, os_disk_delete_option=None, - data_disk_delete_option=None, user_data=None, capacity_reservation_group=None, enable_hibernation=None, - v_cpus_available=None, v_cpus_per_core=None, accept_term=None, - disable_integrity_monitoring=None, # Unused - enable_integrity_monitoring=False, - os_disk_security_encryption_type=None, os_disk_secure_vm_disk_encryption_set=None, - disk_controller_type=None, disable_integrity_monitoring_autoupgrade=False, enable_proxy_agent=None, - proxy_agent_mode=None, source_snapshots_or_disks=None, source_snapshots_or_disks_size_gb=None, - source_disk_restore_point=None, source_disk_restore_point_size_gb=None, ssh_key_type=None, - additional_scheduled_events=None, enable_user_reboot_scheduled_events=None, - enable_user_redeploy_scheduled_events=None, zone_placement_policy=None, include_zones=None, - exclude_zones=None, align_regional_disks_to_vm_zone=None, wire_server_mode=None, imds_mode=None, - wire_server_access_control_profile_reference_id=None, imds_access_control_profile_reference_id=None, - key_incarnation_id=None, add_proxy_agent_extension=None, disk_iops_read_write=None, - disk_mbps_read_write=None): - - from azure.cli.core.commands.client_factory import get_subscription_id - from azure.cli.core.util import random_string, hash_string - from azure.cli.core.commands.arm import ArmTemplateBuilder - from azure.cli.command_modules.vm._template_builder import (build_vm_resource, - build_storage_account_resource, build_nic_resource, - build_vnet_resource, build_nsg_resource, - build_public_ip_resource, StorageProfile, - build_msi_role_assignment, - build_vm_linux_log_analytics_workspace_agent, - build_vm_windows_log_analytics_workspace_agent) - from azure.cli.command_modules.vm._vm_utils import ArmTemplateBuilder20190401 - from azure.mgmt.core.tools import resource_id, is_valid_resource_id, parse_resource_id - - # In the latest profile, the default public IP will be expected to be changed from Basic to Standard, - # and Basic option will be removed. - # In order to avoid breaking change which has a big impact to users, - # we use the hint to guide users to use Standard public IP to create VM in the first stage. - if cmd.cli_ctx.cloud.profile == 'latest': - if public_ip_sku == "Basic": - logger.warning(remove_basic_option_msg, "--public-ip-sku Standard") - - subscription_id = get_subscription_id(cmd.cli_ctx) - if os_disk_encryption_set is not None and not is_valid_resource_id(os_disk_encryption_set): - os_disk_encryption_set = resource_id( - subscription=subscription_id, resource_group=resource_group_name, - namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_encryption_set) - if os_disk_secure_vm_disk_encryption_set is not None and\ - not is_valid_resource_id(os_disk_secure_vm_disk_encryption_set): - os_disk_secure_vm_disk_encryption_set = resource_id( - subscription=subscription_id, resource_group=resource_group_name, - namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_secure_vm_disk_encryption_set) - - if data_disk_encryption_sets is None: - data_disk_encryption_sets = [] - for i, des in enumerate(data_disk_encryption_sets): - if des is not None and not is_valid_resource_id(des): - data_disk_encryption_sets[i] = resource_id( - subscription=subscription_id, resource_group=resource_group_name, - namespace='Microsoft.Compute', type='diskEncryptionSets', name=des) - - storage_sku = disk_info['os'].get('storageAccountType') - - network_id_template = resource_id( - subscription=subscription_id, resource_group=resource_group_name, - namespace='Microsoft.Network') - - vm_id = resource_id( - subscription=subscription_id, resource_group=resource_group_name, - namespace='Microsoft.Compute', type='virtualMachines', name=vm_name) - - # determine final defaults and calculated values - tags = tags or {} - os_disk_name = os_disk_name or ('osdisk_{}'.format(hash_string(vm_id, length=10)) if use_unmanaged_disk else None) - storage_container_name = storage_container_name or 'vhds' - - # Build up the ARM template - if count is None: - master_template = ArmTemplateBuilder() - else: - master_template = ArmTemplateBuilder20190401() - - vm_dependencies = [] - if storage_account_type == 'new': - storage_account = storage_account or 'vhdstorage{}'.format( - hash_string(vm_id, length=14, force_lower=True)) - vm_dependencies.append('Microsoft.Storage/storageAccounts/{}'.format(storage_account)) - master_template.add_resource(build_storage_account_resource(cmd, storage_account, location, - tags, storage_sku, edge_zone)) - - nic_name = None - if nic_type == 'new': - nic_name = '{}VMNic'.format(vm_name) - nic_full_name = 'Microsoft.Network/networkInterfaces/{}'.format(nic_name) - if count: - vm_dependencies.extend([nic_full_name + str(i) for i in range(count)]) - else: - vm_dependencies.append(nic_full_name) - - nic_dependencies = [] - if vnet_type == 'new': - subnet = subnet or '{}Subnet'.format(vm_name) - vnet_exists = False - if vnet_name: - from azure.cli.command_modules.vm._vm_utils import check_existence - vnet_exists = \ - check_existence(cmd.cli_ctx, vnet_name, resource_group_name, 'Microsoft.Network', 'virtualNetworks') - if vnet_exists: - SubnetCreate = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.vnet.subnet").Create - try: - poller = SubnetCreate(cli_ctx=cmd.cli_ctx)(command_args={ - 'name': subnet, - 'vnet_name': vnet_name, - 'resource_group': resource_group_name, - 'address_prefixes': [subnet_address_prefix], - 'address_prefix': subnet_address_prefix - }) - LongRunningOperation(cmd.cli_ctx)(poller) - except Exception: - raise CLIError('Subnet({}) does not exist, but failed to create a new subnet with address ' - 'prefix {}. It may be caused by name or address prefix conflict. Please specify ' - 'an appropriate subnet name with --subnet or a valid address prefix value with ' - '--subnet-address-prefix.'.format(subnet, subnet_address_prefix)) - if not vnet_exists: - vnet_name = vnet_name or '{}VNET'.format(vm_name) - nic_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name)) - master_template.add_resource(build_vnet_resource(cmd, vnet_name, location, tags, vnet_address_prefix, - subnet, subnet_address_prefix, edge_zone=edge_zone)) - - if nsg_type == 'new': - if nsg_rule is None: - nsg_rule = 'RDP' if os_type.lower() == 'windows' else 'SSH' - nsg = nsg or '{}NSG'.format(vm_name) - nic_dependencies.append('Microsoft.Network/networkSecurityGroups/{}'.format(nsg)) - master_template.add_resource(build_nsg_resource(cmd, nsg, location, tags, nsg_rule)) - - if public_ip_address_type == 'new': - public_ip_address = public_ip_address or '{}PublicIP'.format(vm_name) - public_ip_address_full_name = 'Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address) - if count: - nic_dependencies.extend([public_ip_address_full_name + str(i) for i in range(count)]) - else: - nic_dependencies.append(public_ip_address_full_name) - master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location, tags, - public_ip_address_allocation, - public_ip_address_dns_name, - public_ip_sku, zone, count, edge_zone)) - - subnet_id = subnet if is_valid_resource_id(subnet) else \ - '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template, vnet_name, subnet) - - nsg_id = None - if nsg: - nsg_id = nsg if is_valid_resource_id(nsg) else \ - '{}/networkSecurityGroups/{}'.format(network_id_template, nsg) - - public_ip_address_id = None - if public_ip_address: - public_ip_address_id = public_ip_address if is_valid_resource_id(public_ip_address) \ - else '{}/publicIPAddresses/{}'.format(network_id_template, public_ip_address) - - nics_id = '{}/networkInterfaces/{}'.format(network_id_template, nic_name) - - if count: - nics = [ - { - 'id': "[concat('{}', copyIndex())]".format(nics_id), - 'properties': { - 'deleteOption': nic_delete_option - } - } - ] - else: - nics = [ - { - 'id': nics_id, - 'properties': { - 'deleteOption': nic_delete_option - } - } - ] - - nic_resource = build_nic_resource( - cmd, nic_name, location, tags, vm_name, subnet_id, private_ip_address, nsg_id, - public_ip_address_id, application_security_groups, accelerated_networking=accelerated_networking, - count=count, edge_zone=edge_zone) - nic_resource['dependsOn'] = nic_dependencies - master_template.add_resource(nic_resource) - else: - # Using an existing NIC - invalid_parameters = [nsg, public_ip_address, subnet, vnet_name, application_security_groups] - if any(invalid_parameters): - raise CLIError('When specifying an existing NIC, do not specify NSG, ' - 'public IP, ASGs, VNet or subnet.') - if accelerated_networking is not None: - logger.warning('When specifying an existing NIC, do not specify accelerated networking. ' - 'Ignore --accelerated-networking now. ' - 'This will trigger an error instead of a warning in future releases.') - - os_vhd_uri = None - if storage_profile in [StorageProfile.SACustomImage, StorageProfile.SAPirImage]: - storage_account_name = storage_account.rsplit('/', 1) - storage_account_name = storage_account_name[1] if \ - len(storage_account_name) > 1 else storage_account_name[0] - os_vhd_uri = 'https://{}.blob.{}/{}/{}.vhd'.format( - storage_account_name, cmd.cli_ctx.cloud.suffixes.storage_endpoint, storage_container_name, os_disk_name) - elif storage_profile == StorageProfile.SASpecializedOSDisk: - os_vhd_uri = attach_os_disk - os_disk_name = attach_os_disk.rsplit('/', 1)[1][:-4] - - if custom_data: - custom_data = read_content_if_is_file(custom_data) - - if user_data: - user_data = read_content_if_is_file(user_data) - - if secrets: - secrets = _merge_secrets([validate_file_or_dict(secret) for secret in secrets]) - - vm_resource = build_vm_resource( - name=vm_name, location=location, tags=tags, size=size, storage_profile=storage_profile, nics=nics, - admin_username=admin_username, availability_set_id=availability_set, admin_password=admin_password, - ssh_key_values=ssh_key_value, ssh_key_path=ssh_dest_key_path, image_reference=image, - os_disk_name=os_disk_name, custom_image_os_type=os_type, authentication_type=authentication_type, - os_publisher=os_publisher, os_offer=os_offer, os_sku=os_sku, os_version=os_version, os_vhd_uri=os_vhd_uri, - attach_os_disk=attach_os_disk, os_disk_size_gb=os_disk_size_gb, custom_data=custom_data, secrets=secrets, - license_type=license_type, zone=zone, disk_info=disk_info, - boot_diagnostics_storage_uri=boot_diagnostics_storage, ultra_ssd_enabled=ultra_ssd_enabled, - proximity_placement_group=proximity_placement_group, computer_name=computer_name, - dedicated_host=dedicated_host, priority=priority, max_price=max_price, eviction_policy=eviction_policy, - enable_agent=enable_agent, vmss=vmss, os_disk_encryption_set=os_disk_encryption_set, - data_disk_encryption_sets=data_disk_encryption_sets, specialized=specialized, - encryption_at_host=encryption_at_host, dedicated_host_group=dedicated_host_group, - enable_auto_update=enable_auto_update, patch_mode=patch_mode, enable_hotpatching=enable_hotpatching, - platform_fault_domain=platform_fault_domain, security_type=security_type, enable_secure_boot=enable_secure_boot, - enable_vtpm=enable_vtpm, count=count, edge_zone=edge_zone, os_disk_delete_option=os_disk_delete_option, - user_data=user_data, capacity_reservation_group=capacity_reservation_group, - enable_hibernation=enable_hibernation, v_cpus_available=v_cpus_available, v_cpus_per_core=v_cpus_per_core, - os_disk_security_encryption_type=os_disk_security_encryption_type, - os_disk_secure_vm_disk_encryption_set=os_disk_secure_vm_disk_encryption_set, - disk_controller_type=disk_controller_type, enable_proxy_agent=enable_proxy_agent, - proxy_agent_mode=proxy_agent_mode, additional_scheduled_events=additional_scheduled_events, - enable_user_reboot_scheduled_events=enable_user_reboot_scheduled_events, - enable_user_redeploy_scheduled_events=enable_user_redeploy_scheduled_events, - zone_placement_policy=zone_placement_policy, include_zones=include_zones, exclude_zones=exclude_zones, - align_regional_disks_to_vm_zone=align_regional_disks_to_vm_zone, wire_server_mode=wire_server_mode, - imds_mode=imds_mode, - wire_server_access_control_profile_reference_id=wire_server_access_control_profile_reference_id, - imds_access_control_profile_reference_id=imds_access_control_profile_reference_id, - key_incarnation_id=key_incarnation_id, add_proxy_agent_extension=add_proxy_agent_extension, - disk_iops_read_write=disk_iops_read_write, disk_mbps_read_write=disk_mbps_read_write) - - vm_resource['dependsOn'] = vm_dependencies - - if plan_name: - vm_resource['plan'] = { - 'name': plan_name, - 'publisher': plan_publisher, - 'product': plan_product, - 'promotionCode': plan_promotion_code - } - - enable_local_identity = None - if assign_identity is not None: - vm_resource['identity'], _, _, enable_local_identity = _build_identities_info(assign_identity) - if identity_scope: - role_assignment_guid = str(_gen_guid()) - master_template.add_resource(build_msi_role_assignment(vm_name, vm_id, identity_role_id, - role_assignment_guid, identity_scope)) - - if encryption_identity: - if 'identity' in vm_resource and 'userAssignedIdentities' in vm_resource['identity'] \ - and encryption_identity.lower() in \ - (k.lower() for k in vm_resource['identity']['userAssignedIdentities'].keys()): - if 'securityProfile' not in vm_resource['properties']: - vm_resource['properties']['securityProfile'] = {} - if 'encryptionIdentity' not in vm_resource['properties']['securityProfile']: - vm_resource['properties']['securityProfile']['encryptionIdentity'] = {} - - vm_securityProfile_EncryptionIdentity = vm_resource['properties']['securityProfile']['encryptionIdentity'] - - if 'userAssignedIdentityResourceId' not in vm_securityProfile_EncryptionIdentity or \ - vm_securityProfile_EncryptionIdentity['userAssignedIdentityResourceId'] != encryption_identity: - vm_resource['properties']['securityProfile']['encryptionIdentity']['userAssignedIdentityResourceId'] \ - = encryption_identity - else: - raise CLIError("Encryption Identity should be an ARM Resource ID of one of the " - "user assigned identities associated to the resource") - - if workspace is not None: - workspace_id = _prepare_workspace(cmd, resource_group_name, workspace) - master_template.add_secure_parameter('workspaceId', workspace_id) - if os_type.lower() == 'linux': - vm_mmaExtension_resource = build_vm_linux_log_analytics_workspace_agent(cmd, vm_name, location) - master_template.add_resource(vm_mmaExtension_resource) - elif os_type.lower() == 'windows': - vm_mmaExtension_resource = build_vm_windows_log_analytics_workspace_agent(cmd, vm_name, location) - master_template.add_resource(vm_mmaExtension_resource) - else: - logger.warning("Unsupported OS type. Skip the connection step for log analytics workspace.") - - master_template.add_resource(vm_resource) - - if admin_password: - master_template.add_secure_parameter('adminPassword', admin_password) - - template = master_template.build() - parameters = master_template.build_parameters() - - # deploy ARM template - deployment_name = 'vm_deploy_' + random_string(32) - client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, - aux_subscriptions=aux_subscriptions).deployments - DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) - properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental') - Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) - deployment = Deployment(properties=properties) - - if validate: - from azure.cli.command_modules.vm._vm_utils import log_pprint_template - log_pprint_template(template) - log_pprint_template(parameters) - - if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES): - validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment) - return LongRunningOperation(cmd.cli_ctx)(validation_poller) - - return client.validate(resource_group_name, deployment_name, deployment) - - # creates the VM deployment - if no_wait: - return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment) - LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update(resource_group_name, deployment_name, deployment)) - - # Guest Attestation Extension and enable System Assigned MSI by default - is_trusted_launch = security_type and security_type.lower() == 'trustedlaunch' and\ - enable_vtpm and enable_secure_boot - is_confidential_vm = security_type and security_type.lower() == 'confidentialvm' - if (is_trusted_launch or is_confidential_vm) and enable_integrity_monitoring: - vm = get_vm_by_aaz(cmd, resource_group_name, vm_name, 'instanceView') - - publisher = '' - if vm.get('storageProfile', {}).get('osDisk', {}).get('osType', '') == 'Linux': - publisher = 'Microsoft.Azure.Security.LinuxAttestation' - elif vm.get('storageProfile', {}).get('osDisk', {}).get('osType', '') == 'Windows': - publisher = 'Microsoft.Azure.Security.WindowsAttestation' - - version = _normalize_extension_version(cmd.cli_ctx, publisher, 'GuestAttestation', None, vm['location']) - - vm_extension_args = { - 'resource_group': resource_group_name, - 'vm_extension_name': 'GuestAttestation', - 'vm_name': vm_name, - 'location': vm['location'], - 'auto_upgrade_minor_version': True, - 'enable_automatic_upgrade': not disable_integrity_monitoring_autoupgrade, - 'protected_settings': None, - 'publisher': publisher, - 'settings': None, - 'type': 'GuestAttestation', - 'type_handler_version': version - } - - try: - from .operations.vm_extension import VMExtensionCreate - create_vm_extension = VMExtensionCreate(cli_ctx=cmd.cli_ctx)(command_args=vm_extension_args) - LongRunningOperation(cmd.cli_ctx)(create_vm_extension) - logger.info('Guest Attestation Extension has been successfully installed by default ' - 'when Trusted Launch configuration is met') - except Exception as e: - error_type = "Trusted Launch" if is_trusted_launch else "Confidential VM" - logger.error('Failed to install Guest Attestation Extension for %s. %s', error_type, e) - if count: - vm_names = [vm_name + str(i) for i in range(count)] - else: - vm_names = [vm_name] - vms = [] - # Use vm_name2 to avoid R1704: Redefining argument with the local name 'vm_name' (redefined-argument-from-local) - for vm_name2 in vm_names: - vm = get_vm_details(cmd, resource_group_name, vm_name2) - if assign_identity is not None: - if enable_local_identity and not identity_scope: - _show_missing_access_warning(resource_group_name, vm_name2, 'vm') - vm['identity'] = _construct_identity_info(identity_scope, identity_role, - vm.get('identity', {}).get('principalId', None), - vm.get('identity', {}).get('userAssignedIdentities', None)) - vms.append(vm) - - if workspace is not None: - workspace_name = parse_resource_id(workspace_id)['name'] - _set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name) - - if len(vms) == 1: - return vms[0] - return vms - - -def auto_shutdown_vm(cmd, resource_group_name, vm_name, off=None, email=None, webhook=None, time=None, - location=None): - from ..lab.aaz.latest.lab.global_schedule import Delete as DeleteSchedule, Create as CreateSchedule - from azure.mgmt.core.tools import resource_id - from azure.cli.core.commands.client_factory import get_subscription_id - subscription_id = get_subscription_id(cmd.cli_ctx) - name = 'shutdown-computevm-' + vm_name - vm_id = resource_id(subscription=subscription_id, resource_group=resource_group_name, - namespace='Microsoft.Compute', type='virtualMachines', name=vm_name) - - schedule = { - 'name': name, - 'resource_group': resource_group_name - } - if off: - if email is not None or webhook is not None or time is not None: - # I don't want to disrupt users. So I warn instead of raising an error. - logger.warning('If --off, other parameters will be ignored.') - return DeleteSchedule(cli_ctx=cmd.cli_ctx)(command_args=schedule) - - if time is None: - raise CLIError('usage error: --time is a required parameter') - daily_recurrence = {'time': time} - notification_settings = None - if email or webhook: - notification_settings = { - 'timeInMinutes': 30, - 'status': 'Enabled' - } - if email: - notification_settings['emailRecipient'] = email - if webhook: - notification_settings['webhookUrl'] = webhook - - schedule.update({ - 'status': 'Enabled', - 'target_resource_id': vm_id, - 'daily_recurrence': daily_recurrence, - 'notification_settings': notification_settings, - 'time_zone_id': 'UTC', - 'task_type': 'ComputeVmShutdownTask', - 'location': location - }) - return CreateSchedule(cli_ctx=cmd.cli_ctx)(command_args=schedule) - - -def get_instance_view(cmd, resource_group_name, vm_name, include_user_data=False): - from .operations.vm import VMShow - expand = 'instanceView' - if include_user_data: - expand = expand + ',userData' - - result = VMShow(cli_ctx=cmd.cli_ctx)(command_args={ - "resource_group": resource_group_name, - "vm_name": vm_name, - "expand": expand, - }) - return result - - -def get_vm_by_aaz(cmd, resource_group_name, vm_name, expand=None): - from .operations.vm import VMShow - command_args = { - 'resource_group': resource_group_name, - 'vm_name': vm_name, - } - - if expand: - command_args['expand'] = expand - - return VMShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) - - -def get_vm(cmd, resource_group_name, vm_name, expand=None): - client = _compute_client_factory(cmd.cli_ctx) - return client.virtual_machines.get(resource_group_name, vm_name, expand=expand) - - -def get_vm_to_update(cmd, resource_group_name, vm_name): - client = _compute_client_factory(cmd.cli_ctx) - vm = client.virtual_machines.get(resource_group_name, vm_name) - # To avoid unnecessary permission check of image - vm.storage_profile.image_reference = None - return vm - - -def get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name): - from .operations.vm import VMShow - - vm = VMShow(cli_ctx=cmd.cli_ctx)(command_args={ - "resource_group": resource_group_name, - "vm_name": vm_name - }) - - # To avoid unnecessary permission check of image - storage_profile = vm.get("storageProfile", {}) - storage_profile["imageReference"] = None - - return vm - - -def get_vm_details(cmd, resource_group_name, vm_name, include_user_data=False): - from azure.mgmt.core.tools import parse_resource_id - - NicShow = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nic").Show - PublicIPShow = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.public_ip").Show - - result = get_instance_view(cmd, resource_group_name, vm_name, include_user_data) - public_ips = [] - fqdns = [] - private_ips = [] - mac_addresses = [] - # pylint: disable=line-too-long,no-member - for nic_ref in result.get('networkProfile', {}).get('networkInterfaces', []): - nic_parts = parse_resource_id(nic_ref['id']) - nic = NicShow(cli_ctx=cmd.cli_ctx)(command_args={ - "name": nic_parts['name'], - 'resource_group': nic_parts['resource_group'] - }) - if 'macAddress' in nic: - mac_addresses.append(nic['macAddress']) - for ip_configuration in nic['ipConfigurations']: - if 'privateIPAddress' in ip_configuration: - private_ips.append(ip_configuration['privateIPAddress']) - if 'publicIPAddress' in ip_configuration: - res = parse_resource_id(ip_configuration['publicIPAddress']['id']) - public_ip_info = PublicIPShow(cli_ctx=cmd.cli_ctx)(command_args={ - 'name': res['name'], - 'resource_group': res['resource_group'] - }) - if 'ipAddress' in public_ip_info: - public_ips.append(public_ip_info['ipAddress']) - if 'dnsSettings' in public_ip_info: - fqdns.append(public_ip_info['dnsSettings']['fqdn']) - - result['powerState'] = ','.join([s['displayStatus'] for s in result.get('instanceView', {}).get('statuses', []) - if s['code'].startswith('PowerState/')]) - result['publicIps'] = ','.join(public_ips) - result['fqdns'] = ','.join(fqdns) - result['privateIps'] = ','.join(private_ips) - result['macAddresses'] = ','.join(mac_addresses) - - del result['instanceView'] # we don't need other instanceView info as people won't care - return result - - -def list_skus(cmd, location=None, size=None, zone=None, show_all=None, resource_type=None): - from ._vm_utils import list_sku_info, is_sku_available - result = list_sku_info(cmd.cli_ctx, location) - # pylint: disable=too-many-nested-blocks - if not show_all: - available_skus = [] - for sku_info in result: - if is_sku_available(sku_info, zone): - available_skus.append(sku_info) - result = available_skus - if resource_type: - result = [x for x in result if x['resourceType'].lower() == resource_type.lower()] - if size: - result = [x for x in result if x['resourceType'] == 'virtualMachines' and size.lower() in x['name'].lower()] - if zone: - result = [x for x in result if x['locationInfo'] and x['locationInfo'][0]['zones']] - return result - - -# pylint: disable=redefined-builtin -def list_vm(cmd, resource_group_name=None, show_details=False, vmss=None): - from azure.mgmt.core.tools import resource_id, is_valid_resource_id, parse_resource_id - from azure.cli.core.commands.client_factory import get_subscription_id - from .aaz.latest.vm import List as VMList - if vmss is not None: - if is_valid_resource_id(vmss): - filter = "'virtualMachineScaleSet/id' eq '{}'".format(vmss) - if resource_group_name is None: - resource_group_name = parse_resource_id(vmss)['resource_group'] - else: - if resource_group_name is None: - raise RequiredArgumentMissingError( - 'usage error: please specify the --resource-group when listing VM instances with VMSS name') - vmss_id = resource_id(subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, - namespace='Microsoft.Compute', type='virtualMachineScaleSets', name=vmss) - filter = "'virtualMachineScaleSet/id' eq '{}'".format(vmss_id) - - vm_list = VMList(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - "filter": filter - }) - else: - from .aaz.latest.vm import ListAll as VMListAll - vm_list = VMList(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name - }) if resource_group_name else VMListAll(cli_ctx=cmd.cli_ctx)(command_args={}) - - if show_details: - return [get_vm_details(cmd, _parse_rg_name(v['id'])[0], v['name']) for v in vm_list] - - return list(vm_list) - - -def list_vm_ip_addresses(cmd, resource_group_name=None, vm_name=None): - # We start by getting NICs as they are the smack in the middle of all data that we - # want to collect for a VM (as long as we don't need any info on the VM than what - # is available in the Id, we don't need to make any calls to the compute RP) - # - # Since there is no guarantee that a NIC is in the same resource group as a given - # Virtual Machine, we can't constrain the lookup to only a single group... - NicList = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nic").List - PublicIPList = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.public_ip").List - - nics = NicList(cli_ctx=cmd.cli_ctx)(command_args={}) - public_ip_addresses = PublicIPList(cli_ctx=cmd.cli_ctx)(command_args={}) - - ip_address_lookup = {pip['id']: pip for pip in list(public_ip_addresses)} - - result = [] - for nic in [n for n in list(nics) if 'virtualMachine' in n and n['virtualMachine']]: - nic_resource_group, nic_vm_name = _parse_rg_name(nic['virtualMachine']['id']) - - # If provided, make sure that resource group name and vm name match the NIC we are - # looking at before adding it to the result... - same_resource_group_name = (resource_group_name is None or - resource_group_name.lower() == nic_resource_group.lower()) - same_vm_name = (vm_name is None or - vm_name.lower() == nic_vm_name.lower()) - if same_resource_group_name and same_vm_name: - network_info = { - 'privateIpAddresses': [], - 'publicIpAddresses': [] - } - for ip_configuration in nic['ipConfigurations']: - network_info['privateIpAddresses'].append(ip_configuration['privateIPAddress']) - if 'publicIPAddress' in ip_configuration and ip_configuration['publicIPAddress'] and \ - ip_configuration['publicIPAddress']['id'] in ip_address_lookup: - public_ip_address = ip_address_lookup[ip_configuration['publicIPAddress']['id']] - - public_ip_addr_info = { - 'id': public_ip_address['id'], - 'name': public_ip_address['name'], - 'ipAddress': public_ip_address.get('ipAddress', None), - 'ipAllocationMethod': public_ip_address.get('publicIPAllocationMethod', None) - } - - try: - public_ip_addr_info['zone'] = public_ip_address['zones'][0] \ - if 'zones' in public_ip_address else None - except (KeyError, IndexError, TypeError): - pass - - network_info['publicIpAddresses'].append(public_ip_addr_info) - - result.append({ - 'virtualMachine': { - 'resourceGroup': nic_resource_group, - 'name': nic_vm_name, - 'network': network_info - } - }) - - return result - - -def open_vm_port(cmd, resource_group_name, vm_name, port, priority=900, network_security_group_name=None, - apply_to_subnet=False): - from azure.mgmt.core.tools import parse_resource_id - _nic = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nic") - NicShow, NicUpdate = _nic.Show, _nic.Update - _subnet = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.vnet.subnet") - SubnetShow, SubnetUpdate = _subnet.Show, _subnet.Update - _nsg = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nsg") - NSGShow, NSGCreate = _nsg.Show, _nsg.Create - NSGRuleCreate = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nsg.rule").Create - - vm = get_vm_by_aaz(cmd, resource_group_name, vm_name) - location = vm.get('location', '') - if not vm.get('networkProfile'): - raise CLIError("Network profile not found for VM '{}'".format(vm_name)) - - nic_ids = vm.get('networkProfile', {}).get('networkInterfaces', []) - if len(nic_ids) > 1: - raise CLIError('Multiple NICs is not supported for this command. Create rules on the NSG ' - 'directly.') - if not nic_ids: - raise CLIError("No NIC associated with VM '{}'".format(vm_name)) - - # get existing NSG or create a new one - created_nsg = False - nic = NicShow(cli_ctx=cmd.cli_ctx)(command_args={ - 'name': os.path.split(nic_ids[0].get('id'))[1], - 'resource_group': resource_group_name - }) - if not apply_to_subnet: - nsg = nic['networkSecurityGroup'] - else: - subnet_id = parse_resource_id(nic['ipConfigurations'][0]['subnet']['id']) - subnet = SubnetShow(cli_ctx=cmd.cli_ctx)(command_args={ - 'name': subnet_id['child_name_1'], - 'vnet_name': subnet_id['name'], - 'resource_group': resource_group_name - }) - nsg = subnet['networkSecurityGroup'] if 'networkSecurityGroup' in subnet else None - - if not nsg: - nsg = LongRunningOperation(cmd.cli_ctx, 'Creating network security group')( - NSGCreate(cli_ctx=cmd.cli_ctx)(command_args={ - 'name': network_security_group_name, - 'resource_group': resource_group_name, - 'location': location - })) - created_nsg = True - - # update the NSG with the new rule to allow inbound traffic - - rule_name = 'open-port-all' if port == '*' else 'open-port-{}'.format(port.replace(',', '_')) - - # use portranges if multiple ports are entered - if "," not in port: - port_arg = { - 'destination_port_range': port - } - else: - port_arg = { - 'destination_port_ranges': port.split(',') - } - - nsg_name = nsg['name'] if 'name' in nsg else os.path.split(nsg['id'])[1] - LongRunningOperation(cmd.cli_ctx, 'Adding security rule')( - NSGRuleCreate(cli_ctx=cmd.cli_ctx)(command_args={ - 'name': rule_name, - 'nsg_name': nsg_name, - 'resource_group': resource_group_name, - 'protocol': '*', - 'access': 'allow', - 'direction': 'inbound', - 'source_port_range': '*', - **port_arg, - 'priority': priority, - 'source_address_prefix': '*', - 'destination_address_prefix': '*' - }) - ) - - # update the NIC or subnet if a new NSG was created - if created_nsg and not apply_to_subnet: - nic['networkSecurityGroup'] = nsg - LongRunningOperation(cmd.cli_ctx, 'Updating NIC')( - NicUpdate(cli_ctx=cmd.cli_ctx)(command_args={ - 'name': nic['name'], - 'resource_group': resource_group_name, - 'security_rules': nic - })) - elif created_nsg and apply_to_subnet: - subnet['networkSecurityGroup'] = nsg - LongRunningOperation(cmd.cli_ctx, 'Updating subnet')( - SubnetUpdate(cli_ctx=cmd.cli_ctx)(command_args={ - 'name': subnet_id['child_name_1'], - 'resource_group': resource_group_name, - 'vnet_name': subnet_id['name'], - 'subnet': subnet - }) - ) - - return NSGShow(cli_ctx=cmd.cli_ctx)(command_args={ - 'name': nsg_name, - 'resource_group': resource_group_name - }) - - -def resize_vm(cmd, resource_group_name, vm_name, size, no_wait=False): - from .operations.vm import VMCreate, convert_show_result_to_snake_case as to_snake_case - - vm = to_snake_case(get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) or {}) or {} - current_size = (vm.get("hardware_profile") or {}).get("vm_size") - if current_size == size: - logger.warning("VM is already %s", size) - return None - - vm.pop("resources", None) - - if vm.get("hardware_profile") is None: - vm["hardware_profile"] = {} - vm["hardware_profile"]["vm_size"] = size - - vm["resource_group"] = resource_group_name - vm["vm_name"] = vm_name - vm["no_wait"] = no_wait - - return VMCreate(cli_ctx=cmd.cli_ctx)(command_args=vm) - - -def restart_vm(cmd, resource_group_name, vm_name, no_wait=False, force=False): - from .aaz.latest.vm import Redeploy as _VMRedeploy, Restart as _VMRestart - - command_args = { - "resource_group": resource_group_name, - "vm_name": vm_name, - "no_wait": no_wait, - } - - if force: - return _VMRedeploy(cli_ctx=cmd.cli_ctx)(command_args=command_args) - - return _VMRestart(cli_ctx=cmd.cli_ctx)(command_args=command_args) - - -def set_vm(cmd, instance, lro_operation=None, no_wait=False): - instance.resources = None # Issue: https://github.com/Azure/autorest/issues/934 - client = _compute_client_factory(cmd.cli_ctx) - parsed_id = _parse_rg_name(instance.id) - poller = sdk_no_wait(no_wait, client.virtual_machines.begin_create_or_update, - resource_group_name=parsed_id[0], - vm_name=parsed_id[1], - parameters=instance) - if lro_operation: - return lro_operation(poller) - - return LongRunningOperation(cmd.cli_ctx)(poller) - - -# Notes: vm format is in snake_case -def set_vm_by_aaz(cmd, vm, no_wait=False): - from .aaz.latest.vm import Create as _VMCreate - - parsed_id = _parse_rg_name(vm["id"]) - vm["resource_group"] = parsed_id[0] - vm["vm_name"] = parsed_id[1] - vm["no_wait"] = no_wait - - class SetVM(_VMCreate): - def _output(self, *args, **kwargs): - from azure.cli.core.aaz import AAZUndefined, has_value - - # Resolve flatten conflict - # When the type field conflicts, the type in inner layer is ignored and the outer layer is applied - if has_value(self.ctx.vars.instance.resources): - for resource in self.ctx.vars.instance.resources: - if has_value(resource.type): - resource.type = AAZUndefined - - result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) - if result.get('osProfile', {}).get('secrets', []): - for secret in result['osProfile']['secrets']: - for cert in secret.get('vaultCertificates', []): - if not cert.get('certificateStore'): - cert['certificateStore'] = None - return result - - vm = LongRunningOperation(cmd.cli_ctx)( - SetVM(cli_ctx=cmd.cli_ctx)(command_args=vm)) - - return vm - - -def patch_vm(cmd, resource_group_name, vm_name, vm): - client = _compute_client_factory(cmd.cli_ctx) - poller = client.virtual_machines.begin_update(resource_group_name, vm_name, vm) - return LongRunningOperation(cmd.cli_ctx)(poller) - - -def patch_disk_encryption_set(cmd, resource_group_name, disk_encryption_set_name, disk_encryption_set_update): - client = _compute_client_factory(cmd.cli_ctx) - poller = client.disk_encryption_sets.begin_update(resource_group_name, disk_encryption_set_name, - disk_encryption_set_update) - return LongRunningOperation(cmd.cli_ctx)(poller) - - -def show_vm(cmd, resource_group_name, vm_name, show_details=False, include_user_data=False): - if show_details: - return get_vm_details(cmd, resource_group_name, vm_name, include_user_data) - - expand = None - if include_user_data: - expand = "userData" - return get_vm_by_aaz(cmd, resource_group_name, vm_name, expand) - - -def update_vm(cmd, resource_group_name, vm_name, os_disk=None, disk_caching=None, - write_accelerator=None, license_type=None, no_wait=False, ultra_ssd_enabled=None, - priority=None, max_price=None, proximity_placement_group=None, workspace=None, enable_secure_boot=None, - enable_vtpm=None, user_data=None, capacity_reservation_group=None, - dedicated_host=None, dedicated_host_group=None, size=None, ephemeral_os_disk_placement=None, - enable_hibernation=None, v_cpus_available=None, v_cpus_per_core=None, disk_controller_type=None, - security_type=None, enable_proxy_agent=None, proxy_agent_mode=None, additional_scheduled_events=None, - enable_user_reboot_scheduled_events=None, enable_user_redeploy_scheduled_events=None, - align_regional_disks_to_vm_zone=None, wire_server_mode=None, imds_mode=None, - add_proxy_agent_extension=None, - wire_server_access_control_profile_reference_id=None, imds_access_control_profile_reference_id=None, - key_incarnation_id=None, **kwargs): - from azure.mgmt.core.tools import parse_resource_id, resource_id, is_valid_resource_id - from ._vm_utils import update_write_accelerator_settings, update_disk_caching_by_aaz - from .operations.vm import convert_show_result_to_snake_case as vm_convert_show_result_to_snake_case - vm = kwargs['parameters'] - vm = vm_convert_show_result_to_snake_case(vm) - - if wire_server_access_control_profile_reference_id is not None or \ - imds_access_control_profile_reference_id is not None: - from .aaz.latest.vm import Patch as VMPatchUpdate - - class VMUpdateReferenceId(VMPatchUpdate): - def _output(self, *args, **kwargs): - result = self.deserialize_output(self.ctx.vars.instance, client_flatten=False) - return result - - security_profile = {'proxy_agent_settings': {}} - if wire_server_access_control_profile_reference_id: - security_profile['proxy_agent_settings']['wire_server'] = { - 'in_vm_access_control_profile_reference_id': wire_server_access_control_profile_reference_id} - if imds_access_control_profile_reference_id: - security_profile['proxy_agent_settings']['imds'] = { - 'in_vm_access_control_profile_reference_id': imds_access_control_profile_reference_id} - - LongRunningOperation(cmd.cli_ctx)(VMUpdateReferenceId(cli_ctx=cmd.cli_ctx)(command_args={ - 'vm_name': vm_name, - 'resource_group': resource_group_name, - 'security_profile': security_profile - })) - vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) - vm = vm_convert_show_result_to_snake_case(vm) - - if add_proxy_agent_extension is not None: - if vm.get("security_profile", None) is None: - vm["security_profile"] = {} - if vm["security_profile"].get("proxy_agent_settings", None) is None: - vm["security_profile"]["proxy_agent_settings"] = {} - - vm["security_profile"]["proxy_agent_settings"]["add_proxy_agent_extension"] = add_proxy_agent_extension - - disk_name = None - if os_disk is not None: - if is_valid_resource_id(os_disk): - disk_id = os_disk - os_disk_id_parsed = parse_resource_id(os_disk) - disk_name = os_disk_id_parsed['name'] - else: - vm_id_parsed = parse_resource_id(vm["id"]) - disk_id = resource_id(subscription=vm_id_parsed['subscription'], - resource_group=vm_id_parsed['resource_group'], - namespace='Microsoft.Compute', type='disks', name=os_disk) - disk_name = os_disk - - if vm.get("storage_profile", None) is None: - vm["storage_profile"] = {} - if vm["storage_profile"].get("os_disk", None) is None: - vm["storage_profile"]["os_disk"] = {} - if vm["storage_profile"]["os_disk"].get("managed_disk", None) is None: - vm["storage_profile"]["os_disk"]["managed_disk"] = {} - - vm["storage_profile"]["os_disk"]["managed_disk"]["id"] = disk_id - vm["storage_profile"]["os_disk"]["name"] = disk_name - - if align_regional_disks_to_vm_zone is not None: - if vm.get("storage_profile", None) is None: - vm["storage_profile"] = {} - vm["storage_profile"]["align_regional_disks_to_vm_zone"] = align_regional_disks_to_vm_zone - - from ._constants import COMPATIBLE_SECURITY_TYPE_VALUE - if security_type == "TrustedLaunch": - from azure.cli.core.azclierror import InvalidArgumentValueError - if vm.get("security_profile", {}).get("security_type", None) == "ConfidentialVM": - raise InvalidArgumentValueError("{} is already configured with ConfidentialVM. Security Configuration " - "cannot be updated from ConfidentialVM to TrustedLaunch.".format(vm["name"])) # pylint: disable=line-too-long - - if disk_name is None and vm.get("storage_profile", {}).get("os_disk", {}).get("managed_disk", None) is not None: - os_disk_id_parsed = parse_resource_id(vm["storage_profile"]["os_disk"]["managed_disk"]["id"]) - disk_name = os_disk_id_parsed["name"] - - if disk_name is not None: - # Set --enable-secure-boot True and --enable-vtpm True if not specified by end user. - enable_secure_boot = enable_secure_boot if enable_secure_boot is not None else True - enable_vtpm = enable_vtpm if enable_vtpm is not None else True - - if vm.get("security_profile", None) is None: - vm["security_profile"] = {} - vm["security_profile"]["security_type"] = security_type - - elif security_type == COMPATIBLE_SECURITY_TYPE_VALUE: - if vm.get("security_profile", None) is None: - vm["security_profile"] = {} - vm["security_profile"]["security_type"] = security_type - vm["security_profile"]["uefi_settings"] = None - - if write_accelerator is not None: - if vm.get("storage_profile", None) is None: - vm["storage_profile"] = {} - update_write_accelerator_settings(vm["storage_profile"], write_accelerator) - - if disk_caching is not None: - if vm.get("storage_profile", None) is None: - vm["storage_profile"] = {} - update_disk_caching_by_aaz(vm["storage_profile"], disk_caching) - - if license_type is not None: - vm["license_type"] = license_type - - if user_data is not None: - from azure.cli.core.util import b64encode - vm["user_data"] = b64encode(user_data) - - if capacity_reservation_group is not None: - if capacity_reservation_group == 'None': - capacity_reservation_group = None - - sub_resource = {"id": capacity_reservation_group} - capacity_reservation = {"capacity_reservation_group": sub_resource} - vm["capacity_reservation"] = capacity_reservation - - if dedicated_host is not None: - if vm.get("host", None) is None: - vm["host"] = {"id": dedicated_host} - else: - vm["host"]["id"] = dedicated_host - if vm.get("host_group", None) is not None: - vm["host_group"] = None - - if dedicated_host_group is not None: - if vm.get("host_group", None) is None: - vm["host_group"] = {"id": dedicated_host_group} - else: - vm["host_group"]["id"] = dedicated_host_group - if vm.get("host", None) is not None: - vm["host"] = None - - if ultra_ssd_enabled is not None: - if vm.get("additional_capabilities", None) is None: - vm["additional_capabilities"] = {"ultra_ssd_enabled": ultra_ssd_enabled} - else: - vm["additional_capabilities"]["ultra_ssd_enabled"] = ultra_ssd_enabled - - if enable_hibernation is not None: - if vm.get("additional_capabilities", None) is None: - vm["additional_capabilities"] = {"hibernation_enabled": enable_hibernation} - else: - vm["additional_capabilities"]["hibernation_enabled"] = enable_hibernation - - if priority is not None: - vm["priority"] = priority - - if max_price is not None: - if vm.get("billing_profile", None) is None: - vm["billing_profile"] = {"max_price": max_price} - else: - vm["billing_profile"]["max_price"] = max_price - - if proximity_placement_group is not None: - vm["proximity_placement_group"] = {"id": proximity_placement_group} - - if security_type != COMPATIBLE_SECURITY_TYPE_VALUE and (enable_secure_boot is not None or enable_vtpm is not None): - if vm.get("security_profile", None) is None: - vm["security_profile"] = {} - - vm["security_profile"]["uefi_settings"] = {"secure_boot_enabled": enable_secure_boot, - "v_tpm_enabled": enable_vtpm} - - proxy_agent_parameters = [enable_proxy_agent, wire_server_mode, imds_mode, key_incarnation_id] - if any(parameter is not None for parameter in proxy_agent_parameters): - wire_server = {} - imds = {} - if vm.get("security_profile", None) is None: - vm["security_profile"] = {} - vm["security_profile"]["proxy_agent_settings"] = {"wire_server": wire_server, "imds": imds} - elif vm["security_profile"].get("proxy_agent_settings", None) is None: - vm["security_profile"]["proxy_agent_settings"] = {"wire_server": wire_server, "imds": imds} - else: - if vm["security_profile"]["proxy_agent_settings"].get("wire_server", None) is None: - vm["security_profile"]["proxy_agent_settings"]["wire_server"] = wire_server - if vm["security_profile"]["proxy_agent_settings"].get("imds", None) is None: - vm["security_profile"]["proxy_agent_settings"]["imds"] = imds - - if enable_proxy_agent is not None: - vm["security_profile"]["proxy_agent_settings"]["enabled"] = enable_proxy_agent - if key_incarnation_id is not None: - vm["security_profile"]["proxy_agent_settings"]["key_incarnation_id"] = key_incarnation_id - if wire_server_mode is not None: - vm["security_profile"]["proxy_agent_settings"]["wire_server"]["mode"] = wire_server_mode - if imds_mode is not None: - vm["security_profile"]["proxy_agent_settings"]["imds"]["mode"] = imds_mode - - if workspace is not None: - workspace_id = _prepare_workspace(cmd, resource_group_name, workspace) - workspace_name = parse_resource_id(workspace_id)['name'] - _set_log_analytics_workspace_extension(cmd=cmd, - resource_group_name=resource_group_name, - vm=vm, - vm_name=vm_name, - workspace_name=workspace_name) - os_type = vm["storage_profile"]["os_disk"]["os_type"] \ - if vm.get("storage_profile", {}).get("os_disk", {}).get("os_type", None) is not None else None - _set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name) - - if size is not None: - if vm.get("hardware_profile", {}).get("vm_size", None) == size: - logger.warning("VM size is already %s", size) - else: - if vm.get("hardware_profile", None) is None: - vm["hardware_profile"] = {} - vm["hardware_profile"]["vm_size"] = size - - if v_cpus_available is not None: - if vm.get("hardware_profile", None) is None: - vm["hardware_profile"] = {} - if vm["hardware_profile"].get("vm_size_properties", None) is None: - vm["hardware_profile"]["vm_size_properties"] = {} - vm["hardware_profile"]["vm_size_properties"]["v_cp_us_available"] = v_cpus_available - - if v_cpus_per_core is not None: - if vm.get("hardware_profile", None) is None: - vm["hardware_profile"] = {} - if vm["hardware_profile"].get("vm_size_properties", None) is None: - vm["hardware_profile"]["vm_size_properties"] = {} - vm["hardware_profile"]["vm_size_properties"]["v_cp_us_per_core"] = v_cpus_per_core - - if ephemeral_os_disk_placement is not None: - if vm.get("storage_profile", {}).get("os_disk", {}).get("diff_disk_settings", None) is not None: - vm["storage_profile"]["os_disk"]["diff_disk_settings"]["placement"] = ephemeral_os_disk_placement - else: - raise ValidationError("Please update the argument '--ephemeral-os-disk-placement' when " - "creating VM with the option '--ephemeral-os-disk true'") - - if disk_controller_type is not None: - if vm.get("storage_profile", None) is None: - vm["storage_profile"] = {} - vm["storage_profile"]["disk_controller_type"] = disk_controller_type - - if additional_scheduled_events is not None or \ - enable_user_reboot_scheduled_events is not None or enable_user_redeploy_scheduled_events is not None: - if vm.get("scheduled_events_policy", None) is None: - vm["scheduled_events_policy"] = { - "scheduled_events_additional_publishing_targets": { - "event_grid_and_resource_graph": { - "enable": additional_scheduled_events if additional_scheduled_events is not None else False - }, - }, - "user_initiated_reboot": { - "automatically_approve": - enable_user_reboot_scheduled_events if enable_user_reboot_scheduled_events is not None else False # pylint: disable=line-too-long - }, - "user_initiated_redeploy": { - "automatically_approve": - enable_user_redeploy_scheduled_events if enable_user_redeploy_scheduled_events is not None else False # pylint: disable=line-too-long - } - } - else: - if additional_scheduled_events is not None: - vm["scheduled_events_policy"]["scheduled_events_additional_publishing_targets"] = { - "event_grid_and_resource_graph": { - "enable": additional_scheduled_events - } - } - if enable_user_redeploy_scheduled_events is not None: - vm["scheduled_events_policy"]["user_initiated_redeploy"] = { - "automatically_approve": enable_user_redeploy_scheduled_events - } - if enable_user_reboot_scheduled_events is not None: - vm["scheduled_events_policy"]["user_initiated_reboot"] = { - "automatically_approve": enable_user_reboot_scheduled_events - } - if wire_server_access_control_profile_reference_id is not None or \ - imds_access_control_profile_reference_id is not None or \ - add_proxy_agent_extension is not None: - kwargs['parameters'] = vm - - vm["resource_group"] = resource_group_name - vm["vm_name"] = vm_name - vm["no_wait"] = no_wait - - from .operations.vm import VMCreate - return VMCreate(cli_ctx=cmd.cli_ctx)(command_args=vm) -# endregion - - -# region VirtualMachines AvailabilitySets -def create_av_set(cmd, availability_set_name, resource_group_name, platform_fault_domain_count=2, - platform_update_domain_count=None, location=None, proximity_placement_group=None, unmanaged=False, - no_wait=False, tags=None, validate=False, additional_scheduled_events=None, - enable_user_reboot_scheduled_events=None, enable_user_redeploy_scheduled_events=None): - from azure.cli.core.util import random_string - from azure.cli.core.commands.arm import ArmTemplateBuilder - from azure.cli.command_modules.vm._template_builder import build_av_set_resource - - tags = tags or {} - - # Build up the ARM template - master_template = ArmTemplateBuilder() - - av_set_resource = build_av_set_resource(cmd, availability_set_name, location, tags, - platform_update_domain_count, - platform_fault_domain_count, unmanaged, - proximity_placement_group=proximity_placement_group, - additional_scheduled_events=additional_scheduled_events, - enable_user_reboot_scheduled_events=enable_user_reboot_scheduled_events, - enable_user_redeploy_scheduled_events=enable_user_redeploy_scheduled_events) - master_template.add_resource(av_set_resource) - - template = master_template.build() - - # deploy ARM template - deployment_name = 'av_set_deploy_' + random_string(32) - client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments - DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) - properties = DeploymentProperties(template=template, parameters={}, mode='incremental') - Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) - deployment = Deployment(properties=properties) - - if validate: - if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES): - validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment) - return LongRunningOperation(cmd.cli_ctx)(validation_poller) - - return client.validate(resource_group_name, deployment_name, deployment) - - if no_wait: - return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment) - LongRunningOperation(cmd.cli_ctx)(sdk_no_wait(no_wait, client.begin_create_or_update, - resource_group_name, deployment_name, deployment)) - - from .aaz.latest.vm.availability_set import Show as _Show - return _Show(cli_ctx=cmd.cli_ctx)(command_args={'resource_group': resource_group_name, - 'availability_set_name': availability_set_name}) - - -# endregion - - -# region VirtualMachines BootDiagnostics -class DisableBootDiagnostics(UpdateVM): - def pre_instance_update(self, instance): - from azure.cli.core.aaz import has_value - diag_profile = False if not has_value(instance.properties.diagnostics_profile) else ( - instance.properties.diagnostics_profile) - if not (diag_profile and has_value(diag_profile.boot_diagnostics) and - diag_profile.boot_diagnostics.enabled.to_serialized_data()): - return - boot_diag = {'enabled': False, 'storage_uri': None} - instance.properties.diagnostics_profile = {'boot_diagnostics': boot_diag} - - -def disable_boot_diagnostics(cmd, resource_group_name, vm_name): - ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'enabling boot diagnostics', 'done')( - DisableBootDiagnostics(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - 'vm_name': vm_name - }) - ) - - -class EnableBootDiagnostics(UpdateVM): - @classmethod - def _build_arguments_schema(cls, *args, **kwargs): - from azure.cli.core.aaz import AAZStrArg - args_schema = super()._build_arguments_schema(*args, **kwargs) - args_schema.storage = AAZStrArg( - options=["--storage"], - help="Storage account" - ) - return args_schema - - def pre_instance_update(self, instance): - from azure.cli.core.aaz import has_value - from azure.cli.command_modules.vm._vm_utils import get_storage_blob_uri - args = self.ctx.args - storage_uri = None - if has_value(args.storage): - storage_uri = get_storage_blob_uri(self.cli_ctx, args.storage.to_serialized_data()) - boot_diag = {'enabled': True, 'storage_uri': storage_uri} - instance.properties.diagnostics_profile = {'boot_diagnostics': boot_diag} - - -def enable_boot_diagnostics(cmd, resource_group_name, vm_name, storage=None): - ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'enabling boot diagnostics', 'done')( - EnableBootDiagnostics(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - 'vm_name': vm_name, - 'storage': storage - }) - ) - - -class BootLogStreamWriter: # pylint: disable=too-few-public-methods - - def __init__(self, out): - self.out = out - - def write(self, str_or_bytes): - content = str_or_bytes - if isinstance(str_or_bytes, bytes): - try: - content = str_or_bytes.decode('utf8') - except UnicodeDecodeError: - logger.warning("A few characters have been ignored because they were not valid unicode.") - content = str_or_bytes.decode('ascii', 'ignore') - try: - self.out.write(content) - except UnicodeEncodeError: - # e.g. 'charmap' codec can't encode characters in position 258829-258830: character maps to - import unicodedata - ascii_content = unicodedata.normalize('NFKD', content).encode('ascii', 'ignore') - self.out.write(ascii_content.decode()) - logger.warning("A few unicode characters have been ignored because the shell is not able to display. " - "To see the full log, use a shell with unicode capacity") - - -def get_boot_log(cmd, resource_group_name, vm_name): - import re - import sys - from azure.cli.core.profiles import get_sdk - from azure.core.exceptions import HttpResponseError - from .aaz.latest.vm.boot_diagnostics import GetBootLogUris as VmGetBootLogUris - BlobClient = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE_BLOB, '_blob_client#BlobClient') - - virtual_machine = get_instance_view(cmd, resource_group_name, vm_name) - - blob_uri = None - if virtual_machine.get('instanceView', {}).get('bootDiagnostics'): - blob_uri = virtual_machine['instanceView']['bootDiagnostics'].get('serialConsoleLogBlobUri') - - # Managed storage - if blob_uri is None: - try: - command_args = { - 'resource_group': resource_group_name, - 'name': vm_name - } - boot_diagnostics_data = VmGetBootLogUris(cli_ctx=cmd.cli_ctx)(command_args=command_args) - blob_uri = boot_diagnostics_data.get('serialConsoleLogBlobUri') - except HttpResponseError: - pass - if blob_uri is None: - raise CLIError('Please enable boot diagnostics.') - return requests.get(blob_uri).content - - # Find storage account for diagnostics - storage_mgmt_client = _get_storage_management_client(cmd.cli_ctx) - if not blob_uri: - raise CLIError('No console log available') - try: - storage_accounts = storage_mgmt_client.storage_accounts.list() - matching_storage_account = (a for a in list(storage_accounts) - if a.primary_endpoints.blob and blob_uri.startswith(a.primary_endpoints.blob)) - storage_account = next(matching_storage_account) - except StopIteration: - raise CLIError('Failed to find storage account for console log file') - - regex = r'/subscriptions/[^/]+/resourceGroups/(?P[^/]+)/.+' - match = re.search(regex, storage_account.id, re.I) - rg = match.group('rg') - # Get account key - keys = storage_mgmt_client.storage_accounts.list_keys(rg, storage_account.name) - - blob_client = BlobClient.from_blob_url(blob_url=blob_uri, credential=keys.keys[0].value) - - # our streamwriter not seekable, so no parallel. - downloader = blob_client.download_blob(max_concurrency=1) - downloader.readinto(BootLogStreamWriter(sys.stdout)) -# endregion - - -# region VirtualMachines Diagnostics -def set_diagnostics_extension(cmd, resource_group_name, vm_name, settings, protected_settings=None, version=None, - no_auto_upgrade=False): - from .aaz.latest.vm.extension import Delete as VmExtensionDelete - vm = get_instance_view(cmd, resource_group_name, vm_name) - is_linux_os = _is_linux_os_aaz(vm) - vm_extension_name = _LINUX_DIAG_EXT if is_linux_os else _WINDOWS_DIAG_EXT - if is_linux_os: # check incompatible version - exts = vm.get('instanceView', {}).get('extensions', []) - major_ver = extension_mappings[_LINUX_DIAG_EXT]['version'].split('.', maxsplit=1)[0] - if next((e for e in exts if e.get('name') == vm_extension_name and - not e.get('typeHandlerVersion', '').startswith(major_ver + '.')), None): - logger.warning('There is an incompatible version of diagnostics extension installed. ' - 'We will update it with a new version') - poller = VmExtensionDelete(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - 'vm_extension_name': vm_extension_name, - 'vm_name': vm_name - }) - LongRunningOperation(cmd.cli_ctx)(poller) - - return set_extension(cmd, resource_group_name, vm_name, vm_extension_name, - extension_mappings[vm_extension_name]['publisher'], - version or extension_mappings[vm_extension_name]['version'], - settings, protected_settings, no_auto_upgrade) - - -def show_default_diagnostics_configuration(is_windows_os=False): - public_settings = get_default_diag_config(is_windows_os) - # pylint: disable=line-too-long - protected_settings_info = json.dumps({ - 'storageAccountName': "__STORAGE_ACCOUNT_NAME__", - # LAD and WAD are not consistent on sas token format. Call it out here - "storageAccountSasToken": "__SAS_TOKEN_{}__".format("WITH_LEADING_QUESTION_MARK" if is_windows_os else "WITHOUT_LEADING_QUESTION_MARK") - }, indent=2) - logger.warning('Protected settings with storage account info is required to work with the default configurations, e.g. \n%s', protected_settings_info) - return public_settings -# endregion - - -# region VirtualMachines Disks (Managed) -def attach_managed_data_disk(cmd, resource_group_name, vm_name, disk=None, ids=None, disks=None, new=False, sku=None, - size_gb=None, lun=None, caching=None, enable_write_accelerator=False, disk_ids=None, - source_snapshots_or_disks=None, source_disk_restore_point=None, - new_names_of_source_snapshots_or_disks=None, new_names_of_source_disk_restore_point=None): - # attach multiple managed disks using disk attach API - vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) - - if not new and not sku and not size_gb and disk_ids is not None: - if lun: - disk_lun = lun - else: - disk_lun = _get_disk_lun_by_aaz(vm.get("storageProfile", {}).get("dataDisks", [])) - - data_disks = [] - for disk_item in disk_ids: - disk = { - 'diskId': disk_item, - 'caching': caching, - 'lun': disk_lun, - 'writeAcceleratorEnabled': enable_write_accelerator - } - data_disks.append(disk) - disk_lun += 1 - result = AttachDetachDataDisk(cli_ctx=cmd.cli_ctx)(command_args={ - 'vm_name': vm_name, - 'resource_group': resource_group_name, - 'data_disks_to_attach': data_disks - }) - return result - else: - # attach multiple managed disks using vm PUT API - from azure.mgmt.core.tools import parse_resource_id - from .operations.vm import convert_show_result_to_snake_case - - if size_gb is None: - default_size_gb = 1023 - - if disk_ids is not None: - disks = disk_ids - - for disk_item in disks: - if lun: - disk_lun = lun - else: - disk_lun = _get_disk_lun_by_aaz(vm.get("storageProfile", {}).get("dataDisks", [])) - - if new: - data_disk = { - 'lun': disk_lun, - 'createOption': 'Empty', - 'name': parse_resource_id(disk_item)['name'], - 'diskSizeGB': size_gb if size_gb else default_size_gb, - 'caching': caching, - 'managedDisk': { - 'storageAccountType': sku - } - } - else: - data_disk = { - 'lun': disk_lun, - 'createOption': 'Attach', - 'managedDisk': { - 'id': disk_item, - 'storageAccountType': sku - }, - 'caching': caching - } - - if enable_write_accelerator: - data_disk["writeAcceleratorEnabled"] = enable_write_accelerator - - if "storageProfile" not in vm: - vm["storageProfile"] = {} - if "dataDisks" not in vm["storageProfile"]: - vm["storageProfile"]["dataDisks"] = [] - vm["storageProfile"]["dataDisks"].append(data_disk) - disk_lun = _get_disk_lun_by_aaz(vm.get("storageProfile", {}).get("dataDisks", [])) - if source_snapshots_or_disks is not None: - if new_names_of_source_snapshots_or_disks is None: - new_names_of_source_snapshots_or_disks = [None] * len(source_snapshots_or_disks) - for disk_id, disk_name in zip(source_snapshots_or_disks, new_names_of_source_snapshots_or_disks): - disk = { - 'name': disk_name, - 'createOption': 'Copy', - 'caching': caching, - 'lun': disk_lun, - 'writeAcceleratorEnabled': enable_write_accelerator, - "sourceResource": { - "id": disk_id - } - } - if size_gb is not None: - disk.update({ - 'diskSizeGB': size_gb - }) - if sku is not None: - disk.update({ - "managedDisk": { - "storageAccountType": sku - } - }) - disk_lun += 1 - if "storageProfile" not in vm: - vm["storageProfile"] = {} - if "dataDisks" not in vm["storageProfile"]: - vm["storageProfile"]["dataDisks"] = [] - vm["storageProfile"]["dataDisks"].append(disk) - if source_disk_restore_point is not None: - if new_names_of_source_disk_restore_point is None: - new_names_of_source_disk_restore_point = [None] * len(source_disk_restore_point) - for disk_id, disk_name in zip(source_disk_restore_point, new_names_of_source_disk_restore_point): - disk = { - 'name': disk_name, - 'createOption': 'Restore', - 'caching': caching, - 'lun': disk_lun, - 'writeAcceleratorEnabled': enable_write_accelerator, - "sourceResource": { - "id": disk_id - } - } - if size_gb is not None: - disk.update({ - 'diskSizeGB': size_gb - }) - if sku is not None: - disk.update({ - "managedDisk": { - "storageAccountType": sku - } - }) - disk_lun += 1 - if "storageProfile" not in vm: - vm["storageProfile"] = {} - if "dataDisks" not in vm["storageProfile"]: - vm["storageProfile"]["dataDisks"] = [] - vm["storageProfile"]["dataDisks"].append(disk) - - vm = convert_show_result_to_snake_case(vm) - set_vm_by_aaz(cmd, vm) - - -def detach_unmanaged_data_disk(cmd, resource_group_name, vm_name, disk_name): - from .operations.vm import convert_show_result_to_snake_case - # here we handle unmanaged disk - vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) - vm = convert_show_result_to_snake_case(vm) - leftovers = [d for d in vm.get('storage_profile', {}).get('data_disks', []) if - d.get('name', '').lower() != disk_name.lower()] - if len(vm.get('storage_profile', {}).get('data_disks', [])) == len(leftovers): - raise CLIError("No disk with the name '{}' was found".format(disk_name)) - - vm['storage_profile']['data_disks'] = leftovers - - set_vm_by_aaz(cmd, vm) -# endregion - - -def detach_managed_data_disk(cmd, resource_group_name, vm_name, disk_name=None, force_detach=None, disk_ids=None): - from .operations.vm import convert_show_result_to_snake_case - - if disk_ids is not None: - data_disks = [] - for disk_item in disk_ids: - disk = {'diskId': disk_item, 'detachOption': 'ForceDetach' if force_detach else None} - data_disks.append(disk) - result = AttachDetachDataDisk(cli_ctx=cmd.cli_ctx)(command_args={ - 'vm_name': vm_name, - 'resource_group': resource_group_name, - 'data_disks_to_detach': data_disks - }) - return result - else: - # here we handle managed disk - vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) - if not force_detach: - # pylint: disable=no-member - leftovers = [d for d in vm.get("storageProfile", {}).get("dataDisks", []) - if d["name"].lower() != disk_name.lower()] - if len(vm.get("storageProfile", {}).get("dataDisks", [])) == len(leftovers): - raise ResourceNotFoundError("No disk with the name '{}' was found".format(disk_name)) - else: - leftovers = vm.get("storageProfile", {}).get("dataDisks", []) - is_contains = False - for d in leftovers: - if d["name"].lower() == disk_name.lower(): - d["toBeDetached"] = True - d["detachOption"] = "ForceDetach" - is_contains = True - break - if not is_contains: - raise ResourceNotFoundError("No disk with the name '{}' was found".format(disk_name)) - if "storageProfile" not in vm: - vm["storageProfile"] = {} - vm["storageProfile"]["dataDisks"] = leftovers - vm = convert_show_result_to_snake_case(vm) - set_vm_by_aaz(cmd, vm) -# endregion - - -# region VirtualMachines Extensions -def list_extensions(cmd, resource_group_name, vm_name): - from .operations.vm_extension import VMExtensionList - return VMExtensionList(cli_ctx=cmd.cli_ctx)(command_args={ - 'vm_name': vm_name, - 'resource_group': resource_group_name, - })['value'] - - -def show_extensions(cmd, resource_group_name, vm_name, vm_extension_name, instance_view=False, expand=None): - from .operations.vm_extension import VMExtensionShow - if instance_view: - expand = 'instanceView' - - return VMExtensionShow(cli_ctx=cmd.cli_ctx)(command_args={ - 'vm_extension_name': vm_extension_name, - 'resource_group': resource_group_name, - 'vm_name': vm_name, - 'expand': expand - }) - - -def set_extension(cmd, resource_group_name, vm_name, vm_extension_name, publisher, version=None, settings=None, - protected_settings=None, no_auto_upgrade=False, force_update=False, no_wait=False, - extension_instance_name=None, enable_auto_upgrade=None): - from .operations.vm import VMShow as _VMShow - vm = _VMShow(cli_ctx=cmd.cli_ctx)(command_args={ - 'vm_name': vm_name, - 'resource_group': resource_group_name, - 'expand': 'instanceView' - }) - - if not extension_instance_name: - extension_instance_name = vm_extension_name - - instance_name = _get_extension_instance_name_aaz(vm['instanceView'], publisher, vm_extension_name, - suggested_name=extension_instance_name) - if instance_name != extension_instance_name: - msg = "A %s extension with name %s already exists. Updating it with your settings..." - logger.warning(msg, vm_extension_name, instance_name) - if vm_extension_name == 'AHBForRHEL': - logger.warning('Please ensure that you are provisioning AHBForRHEL extension ' - 'on a Red Hat based operating system.') - if vm_extension_name == 'AHBForSLES': - logger.warning('Please ensure that you are provisioning AHBForSLES extension on a SLES based operating system.') - - auto_upgrade_extensions = ['GuestAttestation', 'CodeIntegrityAgent'] - if vm_extension_name in auto_upgrade_extensions and enable_auto_upgrade is None: - enable_auto_upgrade = True - - version = _normalize_extension_version(cmd.cli_ctx, publisher, vm_extension_name, version, vm['location']) - - from .operations.vm_extension import VMExtensionCreate as ExtensionSet - ext_args = { - 'resource_group': resource_group_name, - 'vm_name': vm_name, - 'vm_extension_name': instance_name, - 'location': vm['location'], - 'publisher': publisher, - 'type': vm_extension_name, - 'protected_settings': protected_settings, - 'type_handler_version': version, - 'settings': settings, - 'auto_upgrade_minor_version': (not no_auto_upgrade), - 'enable_automatic_upgrade': enable_auto_upgrade, - 'no_wait': no_wait - } - if force_update: - ext_args['force_update_tag'] = str(_gen_guid()) - return ExtensionSet(cli_ctx=cmd.cli_ctx)(command_args=ext_args) -# endregion - - -# region VirtualMachines Extension Images -def list_vm_extension_images( - cmd, image_location=None, publisher_name=None, name=None, version=None, latest=False): - return load_extension_images_thru_services( - cmd.cli_ctx, publisher_name, name, version, image_location, latest) -# endregion - - -# region VirtualMachines Identity -def _remove_identities_by_aaz(cmd, resource_group_name, name, identities, getter, setter): - from ._vm_utils import MSI_LOCAL_ID - - remove_system_assigned_identity = False - - if MSI_LOCAL_ID in identities: - remove_system_assigned_identity = True - identities.remove(MSI_LOCAL_ID) - - resource = getter(cmd, resource_group_name, name) - existing_identity = resource.get('identity') - - if existing_identity is None: - return None - - existing_emsis = [x.lower() for x in (existing_identity.get('userAssignedIdentities') or {}).keys()] - existing_identity['userAssignedIdentities'] = {} - - if identities: - emsis_to_remove = [x.lower() for x in identities] - - non_existing = [emsis for emsis in emsis_to_remove if emsis not in existing_emsis] - if non_existing: - raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name)) - - emsis_to_retain = [emsis for emsis in existing_emsis if emsis not in emsis_to_remove] - - if not emsis_to_retain: # if all emsis are gone, we need to update the type - if existing_identity['type'] == IdentityType.USER_ASSIGNED.value: - existing_identity['type'] = IdentityType.NONE.value - elif existing_identity['type'] == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value: - existing_identity['type'] = IdentityType.SYSTEM_ASSIGNED.value - - for emsis in identities: - existing_identity['userAssignedIdentities'][emsis] = {} - - if remove_system_assigned_identity: - if existing_identity['type'] == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value \ - or existing_identity['type'] == IdentityType.USER_ASSIGNED.value: - existing_identity['type'] = IdentityType.USER_ASSIGNED.value - else: - existing_identity['type'] = IdentityType.NONE.value - - result = LongRunningOperation(cmd.cli_ctx)(setter(resource_group_name, name, resource)) - - if not result: - return None - - return result.get('identity') or None - - -def remove_vm_identity(cmd, resource_group_name, vm_name, identities=None): - def setter(resource_group_name, vm_name, vm): - command_args = { - 'resource_group': resource_group_name, - 'vm_name': vm_name - } - - if vm.get('identity') and vm.get('identity').get('type') == IdentityType.USER_ASSIGNED.value: - # NOTE: The literal 'UserAssigned' is intentionally appended as a marker for - # VMIdentityRemove._format_content, which uses it to apply special handling - # for purely user-assigned identities. It is not a real identity resource ID. - command_args['mi_user_assigned'] = \ - list(vm.get('identity', {}).get('userAssignedIdentities', {}).keys()) + ['UserAssigned'] - elif vm.get('identity') and vm.get('identity').get('type') == IdentityType.SYSTEM_ASSIGNED.value: - command_args['mi_user_assigned'] = [] - command_args['mi_system_assigned'] = 'True' - elif vm.get('identity') and vm.get('identity').get('type') == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value: - command_args['mi_user_assigned'] = list(vm.get('identity', {}).get('userAssignedIdentities', {}).keys()) - command_args['mi_system_assigned'] = 'True' - else: - command_args['mi_user_assigned'] = [] - - from .operations.vm import VMIdentityRemove - return VMIdentityRemove(cli_ctx=cmd.cli_ctx)(command_args=command_args) - - if identities is None: - from ._vm_utils import MSI_LOCAL_ID - identities = [MSI_LOCAL_ID] - - return _remove_identities_by_aaz(cmd, resource_group_name, vm_name, identities, get_vm_by_aaz, setter) - - -# region VirtualMachines Images -def list_vm_images(cmd, image_location=None, publisher_name=None, offer=None, sku=None, all=False, # pylint: disable=redefined-builtin - edge_zone=None, architecture=None): - load_thru_services = all or edge_zone is not None - - if load_thru_services: - if not publisher_name and not offer and not sku and not edge_zone: - logger.warning("You are retrieving all the images from server which could take more than a minute. " - "To shorten the wait, provide '--publisher', '--offer' , '--sku' or '--edge-zone'." - " Partial name search is supported.") - all_images = load_images_thru_services(cmd.cli_ctx, publisher_name, offer, sku, image_location, edge_zone, - architecture) - else: - all_images = load_images_from_aliases_doc(cmd.cli_ctx, publisher_name, offer, sku, architecture) - logger.warning('You are viewing an offline list of images, use --all to retrieve an up-to-date list') - - if edge_zone is not None: - for i in all_images: - i['urn'] = ':'.join([i['publisher'], i['offer'], i['sku'], i['edge_zone'], i['version']]) - else: - for i in all_images: - i['urn'] = ':'.join([i['publisher'], i['offer'], i['sku'], i['version']]) - return all_images - - -def list_offers(cmd, publisher_name, location, edge_zone=None): - if edge_zone is not None: - from .aaz.latest.vm.image.edge_zone import ListOffers - return ListOffers(cli_ctx=cmd.cli_ctx)(command_args={ - 'location': location, - 'edge_zone': edge_zone, - 'publisher': publisher_name - }) - else: - from .aaz.latest.vm.image import ListOffers - return ListOffers(cli_ctx=cmd.cli_ctx)(command_args={ - 'location': location, - 'publisher': publisher_name - }) - - -def list_publishers(cmd, location, edge_zone=None): - if edge_zone is not None: - from .aaz.latest.vm.image.edge_zone import ListPublishers - return ListPublishers(cli_ctx=cmd.cli_ctx)(command_args={ - 'location': location, - 'edge_zone': edge_zone, - }) - else: - from .aaz.latest.vm.image import ListPublishers - return ListPublishers(cli_ctx=cmd.cli_ctx)(command_args={ - 'location': location, - }) - - -def list_sku(cmd, location, publisher_name, offer, edge_zone=None,): - if edge_zone is not None: - from .aaz.latest.vm.image.edge_zone import ListSkus - return ListSkus(cli_ctx=cmd.cli_ctx)(command_args={ - 'location': location, - 'edge_zone': edge_zone, - 'publisher': publisher_name, - 'offer': offer, - }) - else: - from .aaz.latest.vm.image import ListSkus - return ListSkus(cli_ctx=cmd.cli_ctx)(command_args={ - 'location': location, - 'publisher': publisher_name, - 'offer': offer, - }) - - -def show_vm_image(cmd, urn=None, publisher=None, offer=None, sku=None, version=None, location=None, edge_zone=None): - from azure.cli.core.commands.parameters import get_one_of_subscription_locations - from azure.cli.core.azclierror import (MutuallyExclusiveArgumentError, - InvalidArgumentValueError) - - location = location or get_one_of_subscription_locations(cmd.cli_ctx) - error_msg = 'Please specify all of (--publisher, --offer, --sku, --version), or --urn' - if urn: - if any([publisher, offer, sku, edge_zone, version]): - recommendation = 'Try to use --urn publisher:offer:sku:version or' \ - ' --urn publisher:offer:sku:edge_zone:version' - raise MutuallyExclusiveArgumentError(error_msg, recommendation) - items = urn.split(":") - if len(items) != 4 and len(items) != 5: - raise InvalidArgumentValueError( - '--urn should be in the format of publisher:offer:sku:version or publisher:offer:sku:edge_zone:version') - if len(items) == 5: - publisher, offer, sku, edge_zone, version = urn.split(":") - elif len(items) == 4: - publisher, offer, sku, version = urn.split(":") - if version.lower() == 'latest': - version = _get_latest_image_version(cmd.cli_ctx, location, publisher, offer, sku) - elif not publisher or not offer or not sku or not version: - raise RequiredArgumentMissingError(error_msg) - if edge_zone is not None: - from .aaz.latest.vm.image.edge_zone import Show - return Show(cli_ctx=cmd.cli_ctx)(command_args={ - 'location': location, - 'edge_zone': edge_zone, - 'publisher': publisher, - 'offer': offer, - 'sku': sku, - 'version': version, - }) - else: - from .aaz.latest.vm.image import Show - return Show(cli_ctx=cmd.cli_ctx)(command_args={ - 'location': location, - 'publisher': publisher, - 'offer': offer, - 'sku': sku, - 'version': version, - }) - - -def accept_market_ordering_terms(cmd, urn=None, publisher=None, offer=None, plan=None): - from azure.mgmt.marketplaceordering import MarketplaceOrderingAgreements - from azure.mgmt.marketplaceordering.models import OfferType - from azure.cli.core.azclierror import (MutuallyExclusiveArgumentError, - InvalidArgumentValueError) - - error_msg = 'Please specify all of (--plan, --offer, --publish), or --urn' - if urn: - if any([publisher, offer, plan]): - recommendation = 'Try to use --urn publisher:offer:sku:version only' - raise MutuallyExclusiveArgumentError(error_msg, recommendation) - items = urn.split(':') - if len(items) != 4: - raise InvalidArgumentValueError('--urn should be in the format of publisher:offer:sku:version') - publisher, offer, _, _ = items - image = show_vm_image(cmd, urn) - if not image.get('plan', None): - logger.warning("Image '%s' has no terms to accept.", urn) - return - plan = image['plan']['name'] - else: - if not publisher or not offer or not plan: - raise RequiredArgumentMissingError(error_msg) - - market_place_client = get_mgmt_service_client(cmd.cli_ctx, MarketplaceOrderingAgreements) - - term = market_place_client.marketplace_agreements.get(offer_type=OfferType.VIRTUALMACHINE, - publisher_id=publisher, - offer_id=offer, - plan_id=plan) - term.accepted = True - return market_place_client.marketplace_agreements.create(offer_type=OfferType.VIRTUALMACHINE, - publisher_id=publisher, - offer_id=offer, - plan_id=plan, - parameters=term) -# endregion - - -def _terms_prepare(cmd, urn, publisher, offer, plan): - if urn: - if any([publisher, offer, plan]): - raise CLIError('usage error: If using --urn, do not use any of --plan, --offer, --publisher.') - terms = urn.split(':') - if len(terms) != 4: - raise CLIError('usage error: urn should be in the format of publisher:offer:sku:version.') - publisher, offer = terms[0], terms[1] - image = show_vm_image(cmd, urn) - if not image.get('plan', None): - raise CLIError("Image '%s' has no terms to accept." % urn) - plan = image['plan']['name'] - else: - if not all([publisher, offer, plan]): - raise CLIError( - 'usage error: If not using --urn, all of --plan, --offer and --publisher should be provided.') - return publisher, offer, plan - - -def _accept_cancel_terms(cmd, urn, publisher, offer, plan, accept): - from azure.mgmt.marketplaceordering.models import OfferType - publisher, offer, plan = _terms_prepare(cmd, urn, publisher, offer, plan) - op = cf_vm_image_term(cmd.cli_ctx, '') - terms = op.get(offer_type=OfferType.VIRTUALMACHINE, - publisher_id=publisher, - offer_id=offer, - plan_id=plan) - terms.accepted = accept - return op.create(offer_type=OfferType.VIRTUALMACHINE, - publisher_id=publisher, - offer_id=offer, - plan_id=plan, - parameters=terms) - - -def accept_terms(cmd, urn=None, publisher=None, offer=None, plan=None): - """ - Accept Azure Marketplace image terms so that the image can be used to create VMs. - :param cmd:cmd - :param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted - :param publisher:Image publisher - :param offer:Image offer - :param plan:Image billing plan - :return: - """ - return _accept_cancel_terms(cmd, urn, publisher, offer, plan, True) - - -def cancel_terms(cmd, urn=None, publisher=None, offer=None, plan=None): - """ - Cancel Azure Marketplace image terms. - :param cmd:cmd - :param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted - :param publisher:Image publisher - :param offer:Image offer - :param plan:Image billing plan - :return: - """ - return _accept_cancel_terms(cmd, urn, publisher, offer, plan, False) - - -def get_terms(cmd, urn=None, publisher=None, offer=None, plan=None): - """ - Get the details of Azure Marketplace image terms. - :param cmd:cmd - :param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted - :param publisher:Image publisher - :param offer:Image offer - :param plan:Image billing plan - :return: - """ - from azure.mgmt.marketplaceordering.models import OfferType - publisher, offer, plan = _terms_prepare(cmd, urn, publisher, offer, plan) - op = cf_vm_image_term(cmd.cli_ctx, '') - terms = op.get(offer_type=OfferType.VIRTUALMACHINE, - publisher_id=publisher, - offer_id=offer, - plan_id=plan) - return terms - - -# region VirtualMachines NetworkInterfaces (NICs) -def show_vm_nic(cmd, resource_group_name, vm_name, nic): - from azure.mgmt.core.tools import parse_resource_id - - NicShow = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nic").Show - - vm = get_vm_by_aaz(cmd, resource_group_name, vm_name) - - found = next( - (n for n in vm.get("networkProfile", {}).get("networkInterfaces", []) if nic.lower() == n["id"].lower()), None - # pylint: disable=no-member - ) - if found: - nic_name = parse_resource_id(found["id"])['name'] - return NicShow(cli_ctx=cmd.cli_ctx)(command_args={ - 'name': nic_name, - 'resource_group': resource_group_name - }) - raise CLIError("NIC '{}' not found on VM '{}'".format(nic, vm_name)) - - -def list_vm_nics(cmd, resource_group_name, vm_name): - vm = get_vm_by_aaz(cmd, resource_group_name, vm_name) - return vm.get("networkProfile", {}).get("networkInterfaces", []) # pylint: disable=no-member - - -def add_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None): - vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) - new_nics = _build_nic_list(cmd, nics) - existing_nics = _get_existing_nics(vm) - return _update_vm_nics(cmd, vm, existing_nics + new_nics, primary_nic) - - -def remove_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None): - - def to_delete(nic_id): - return [n for n in nics_to_delete if n["id"].lower() == nic_id.lower()] - - vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) - nics_to_delete = _build_nic_list(cmd, nics) - existing_nics = _get_existing_nics(vm) - - survived = [x for x in existing_nics if not to_delete(x["id"])] - - return _update_vm_nics(cmd, vm, survived, primary_nic) - - -def set_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None): - vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) - nics = _build_nic_list(cmd, nics) - return _update_vm_nics(cmd, vm, nics, primary_nic) - - -def _build_nic_list(cmd, nic_ids): - NicShow = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nic").Show - nic_list = [] - if nic_ids: - # pylint: disable=no-member - for nic_id in nic_ids: - rg, name = _parse_rg_name(nic_id) - nic = NicShow(cli_ctx=cmd.cli_ctx)(command_args={ - 'name': name, - 'resource_group': rg - }) - nic_list.append({"id": nic["id"], "primary": False}) - return nic_list - - -def _get_existing_nics(vm): - network_profile = vm.get("networkProfile", None) - nics = [] - if network_profile is not None: - nics = network_profile.get("networkInterfaces", []) - return nics - - -def _update_vm_nics(cmd, vm, nics, primary_nic): - from .operations.vm import convert_show_result_to_snake_case - - if primary_nic: - try: - _, primary_nic_name = _parse_rg_name(primary_nic) - except IndexError: - primary_nic_name = primary_nic - - matched = [n for n in nics if _parse_rg_name(n["id"])[1].lower() == primary_nic_name.lower()] - if not matched: - raise CLIError('Primary Nic {} is not found'.format(primary_nic)) - if len(matched) > 1: - raise CLIError('Duplicate Nic entries with name {}'.format(primary_nic)) - for n in nics: - n["primary"] = False - matched[0]["primary"] = True - elif nics: - if not [n for n in nics if n["primary"]]: - nics[0]["primary"] = True - - if "networkProfile" not in vm: - vm["networkProfile"] = {} - vm["networkProfile"]["networkInterfaces"] = nics - vm = convert_show_result_to_snake_case(vm) - result = set_vm_by_aaz(cmd, vm) - return (result.get("networkProfile") or {}).get("networkInterfaces") or [] -# endregion - - -# region VirtualMachines RunCommand -def run_command_invoke(cmd, resource_group_name, vm_vmss_name, command_id, scripts=None, parameters=None, instance_id=None): # pylint: disable=line-too-long - parameters = parameters or [] # CLI user input arg "parameters" - params = [] # AAZCommand arg for "parameters" - auto_arg_name_num = 0 - for p in parameters: - if '=' in p: - n, v = p.split('=', 1) - else: - # RunCommand API requires named arguments, which doesn't make lots of sense for bash scripts - # using positional arguments, so here we provide names just to get API happy - # note, we don't handle mixing styles, but will consolidate by GA when API is settled - auto_arg_name_num += 1 - n = 'arg{}'.format(auto_arg_name_num) - v = p - params.append({'name': n, 'value': v}) - - # if instance_id, this is a vmss instance - if instance_id: - from .aaz.latest.vmss.run_command import Invoke - return Invoke(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - 'vmss_name': vm_vmss_name, - 'instance_id': instance_id, - 'command_id': command_id, - 'script': scripts, - 'parameters': params - }) - - # otherwise this is a regular vm instance - from .aaz.latest.vm.run_command import Invoke - return Invoke(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - 'vm_name': vm_vmss_name, - 'command_id': command_id, - 'script': scripts, - 'parameters': params - }) - - -def vm_run_command_invoke(cmd, resource_group_name, vm_name, command_id, scripts=None, parameters=None): - return run_command_invoke(cmd, resource_group_name, vm_name, command_id, scripts, parameters) - - -def vm_run_command_create(cmd, - resource_group_name, - vm_name, - run_command_name, - location, - tags=None, - script=None, - script_uri=None, - command_id=None, - parameters=None, - protected_parameters=None, - async_execution=None, - run_as_user=None, - run_as_password=None, - timeout_in_seconds=None, - output_blob_uri=None, - error_blob_uri=None, - no_wait=False): - from .aaz.latest.vm.run_command import Create - args = {} - args['location'] = location - args['resource_group'] = resource_group_name - args['run_command_name'] = run_command_name - args['vm_name'] = vm_name - args['no_wait'] = no_wait - if tags is not None: - args['tags'] = tags - if script is not None: - args['script'] = script - if script_uri is not None: - args['script_uri'] = script_uri - if command_id is not None: - args['command_id'] = command_id - if parameters is not None: - auto_arg_name_num = 0 - args['parameters'] = [] - for p in parameters: - if '=' in p: - n, v = p.split('=', 1) - else: - auto_arg_name_num += 1 - n = 'arg{}'.format(auto_arg_name_num) - v = p - args['parameters'].append({'name': n, 'value': v}) - if protected_parameters is not None: - auto_arg_name_num = 0 - args['protected_parameters'] = [] - for p in protected_parameters: - if '=' in p: - n, v = p.split('=', 1) - else: - auto_arg_name_num += 1 - n = 'arg{}'.format(auto_arg_name_num) - v = p - args['protected_parameters'].append({'name': n, 'value': v}) - if async_execution is not None: - args['async_execution'] = async_execution - else: - args['async_execution'] = False - if run_as_user is not None: - args['run_as_user'] = run_as_user - if run_as_password is not None: - args['run_as_password'] = run_as_password - if timeout_in_seconds is not None: - args['timeout_in_seconds'] = timeout_in_seconds - if output_blob_uri is not None: - args['output_blob_uri'] = output_blob_uri - if error_blob_uri is not None: - args['error_blob_uri'] = error_blob_uri - return Create(cli_ctx=cmd.cli_ctx)(command_args=args) - - -def vm_run_command_update(cmd, - resource_group_name, - vm_name, - run_command_name, - location, - tags=None, - script=None, - script_uri=None, - command_id=None, - parameters=None, - protected_parameters=None, - async_execution=None, - run_as_user=None, - run_as_password=None, - timeout_in_seconds=None, - output_blob_uri=None, - error_blob_uri=None, - no_wait=False): - from .aaz.latest.vm.run_command import Update as _Update - - class Update(_Update): - def pre_instance_update(self, instance): - if tags is not None: - instance.tags = tags - if location is not None: - instance.location = location - if script is not None: - instance.properties.source.script = script - if script_uri is not None: - instance.properties.source.script_uri = script_uri - if command_id is not None: - instance.properties.source.command_id = command_id - if parameters is not None: - auto_arg_name_num = 0 - _params = [] - for p in parameters: - if '=' in p: - n, v = p.split('=', 1) - else: - auto_arg_name_num += 1 - n = 'arg{}'.format(auto_arg_name_num) - v = p - _params.append({'name': n, 'value': v}) - instance.properties.parameters = _params - if protected_parameters is not None: - auto_arg_name_num = 0 - _params = [] - for p in protected_parameters: - if '=' in p: - n, v = p.split('=', 1) - else: - auto_arg_name_num += 1 - n = 'arg{}'.format(auto_arg_name_num) - v = p - _params.append({'name': n, 'value': v}) - instance.properties.protected_parameters = _params - if async_execution is not None: - instance.properties.async_execution = async_execution - else: - instance.properties.async_execution = False - if run_as_user is not None: - instance.properties.run_as_user = run_as_user - if run_as_password is not None: - instance.properties.run_as_password = run_as_password - if timeout_in_seconds is not None: - instance.properties.timeout_in_seconds = timeout_in_seconds - if output_blob_uri is not None: - instance.properties.output_blob_uri = output_blob_uri - if error_blob_uri is not None: - instance.properties.error_blob_uri = error_blob_uri - - args = {} - args['resource_group'] = resource_group_name - args['run_command_name'] = run_command_name - args['vm_name'] = vm_name - args['no_wait'] = no_wait - return Update(cli_ctx=cmd.cli_ctx)(command_args=args) - - -def vm_run_command_list(cmd, - resource_group_name=None, - vm_name=None, - expand=None, - location=None): - - if not location and not (resource_group_name and vm_name): - raise RequiredArgumentMissingError("Please specify --location or specify --vm-name and --resource-group") - - from .aaz.latest.vm.run_command import List, ListBySubscription - - if vm_name: - return List(cli_ctx=cmd.cli_ctx)(command_args={ - "resource_group": resource_group_name, - "vm_name": vm_name, - "expand": expand - }) - - return ListBySubscription(cli_ctx=cmd.cli_ctx)(command_args={ - "location": location - }) - - -def vm_run_command_show(cmd, - resource_group_name=None, - vm_name=None, - run_command_name=None, - expand=None, - instance_view=False, - location=None, - command_id=None): - - if not (resource_group_name and vm_name and run_command_name) and not (location and command_id): - raise RequiredArgumentMissingError( - "Please specify --location and --command-id or specify --vm-name, --resource-group and --run-command-name") - - from .aaz.latest.vm.run_command import Show, ShowById - - if vm_name: - if instance_view: - expand = 'instanceView' - return Show(cli_ctx=cmd.cli_ctx)(command_args={ - "resource_group": resource_group_name, - "vm_name": vm_name, - "expand": expand, - "run_command_name": run_command_name - }) - - return ShowById(cli_ctx=cmd.cli_ctx)(command_args={ - "location": location, - "command_id": command_id - }) - -# endregion - - -# region VirtualMachines Secrets -def _get_vault_id_from_name(cli_ctx, client, vault_name): - group_name = _get_resource_group_from_vault_name(cli_ctx, vault_name) - if not group_name: - raise CLIError("unable to find vault '{}' in current subscription.".format(vault_name)) - vault = client.get(group_name, vault_name) - return vault.id - - -def get_vm_format_secret(cmd, secrets, certificate_store=None, keyvault=None, resource_group_name=None): - from azure.keyvault.secrets._shared import parse_key_vault_id - import re - client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT).vaults - grouped_secrets = {} - - merged_secrets = [] - for s in secrets: - merged_secrets += s.splitlines() - - # group secrets by source vault - for secret in merged_secrets: - parsed = parse_key_vault_id(secret) - match = re.search('://(.+?)\\.', parsed.vault_url) - vault_name = match.group(1) - if vault_name not in grouped_secrets: - grouped_secrets[vault_name] = { - 'vaultCertificates': [], - 'id': keyvault or _get_vault_id_from_name(cmd.cli_ctx, client, vault_name) - } - - vault_cert = {'certificateUrl': secret} - if certificate_store: - vault_cert['certificateStore'] = certificate_store - - grouped_secrets[vault_name]['vaultCertificates'].append(vault_cert) - - # transform the reduced map to vm format - formatted = [{'sourceVault': {'id': value['id']}, - 'vaultCertificates': value['vaultCertificates']} - for _, value in list(grouped_secrets.items())] - - return formatted - - -def add_vm_secret(cmd, resource_group_name, vm_name, keyvault, certificate, certificate_store=None): - from azure.mgmt.core.tools import parse_resource_id - from ._vm_utils import create_data_plane_keyvault_certificate_client, get_key_vault_base_url - from .operations.vm import convert_show_result_to_snake_case - vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) - vm = convert_show_result_to_snake_case(vm) - - if '://' not in certificate: # has a cert name rather a full url? - keyvault_client = create_data_plane_keyvault_certificate_client( - cmd.cli_ctx, get_key_vault_base_url(cmd.cli_ctx, parse_resource_id(keyvault)['name'])) - cert_info = keyvault_client.get_certificate(certificate) - certificate = cert_info.secret_id - - if not _is_linux_os_by_aaz(vm): - certificate_store = certificate_store or 'My' - elif certificate_store: - raise CLIError('Usage error: --certificate-store is only applicable on Windows VM') - vault_cert = { - 'certificate_store': certificate_store, - 'certificate_url': certificate - } - vault_secret_group = next((x for x in vm.get('os_profile', {}).get('secrets', []) - if x.get('source_vault', {}).get('id', '').lower() == keyvault.lower()), None) - if vault_secret_group: - certs = vault_secret_group.get('vault_certificates', []) - certs.append(vault_cert) - vault_secret_group['vault_certificates'] = certs - else: - vault_secret_group = { - 'source_vault': { - 'id': keyvault - }, - 'vault_certificates': [vault_cert] - } - - if not vm.get('os_profile'): - vm['os_profile'] = {'secret': []} - - if not vm.get('os_profile').get('secrets'): - vm['os_profile']['secrets'] = [] - - vm['os_profile']['secrets'].append(vault_secret_group) - - vm = set_vm_by_aaz(cmd, vm) - return vm.get('osProfile', {}).get('secrets', []) - - -def list_vm_secrets(cmd, resource_group_name, vm_name): - vm = get_vm_by_aaz(cmd, resource_group_name, vm_name) - - if vm.get('osProfile', {}).get('secrets', []): - for secret in vm['osProfile']['secrets']: - for cert in secret.get('vaultCertificates', []): - if not cert.get('certificateStore'): - cert['certificateStore'] = None - - return vm.get('osProfile', {}).get('secrets', []) - - -def remove_vm_secret(cmd, resource_group_name, vm_name, keyvault, certificate=None): - from .operations.vm import convert_show_result_to_snake_case - vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) - - # support 2 kinds of filter: - # a. if only keyvault is supplied, we delete its whole vault group. - # b. if both keyvault and certificate are supplied, we only delete the specific cert entry. - - to_keep = vm.get('osProfile', {}).get('secrets', []) - keyvault_matched = [] - if keyvault: - keyvault = keyvault.lower() - keyvault_matched = [x for x in to_keep if x.get('sourceVault', {}).get('id', '').lower() == keyvault] - - if keyvault and not certificate: - to_keep = [x for x in to_keep if x not in keyvault_matched] - elif certificate: - temp = keyvault_matched if keyvault else to_keep - cert_url_pattern = certificate.lower() - if '://' not in cert_url_pattern: # just a cert name? - cert_url_pattern = '/' + cert_url_pattern + '/' - for x in temp: - x['vaultCertificates'] = [v for v in x.get('vaultCertificates') - if not (v.get('certificateUrl') and - cert_url_pattern in v.get('certificateUrl', '').lower())] - to_keep = [x for x in to_keep if x.get('vaultCertificates')] # purge all groups w/o any cert entries - - vm['osProfile']['secrets'] = to_keep - vm = convert_show_result_to_snake_case(vm) - vm = set_vm_by_aaz(cmd, vm) - return vm.get('osProfile', {}).get('secrets', []) -# endregion - - -# region VirtualMachines UnmanagedDisks -def attach_unmanaged_data_disk(cmd, resource_group_name, vm_name, new=False, vhd_uri=None, lun=None, - disk_name=None, size_gb=1023, caching=None): - from .operations.vm import convert_show_result_to_snake_case - from ._vm_utils import DiskCreateOptionTypes - if not new and not disk_name: - raise CLIError('Please provide the name of the existing disk to attach') - - vm = get_vm_to_update_by_aaz(cmd, resource_group_name, vm_name) - vm = convert_show_result_to_snake_case(vm) - if disk_name is None: - import datetime - disk_name = vm_name + '-' + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") - # pylint: disable=no-member - if vhd_uri is None: - if not vm.get('storage_profile', {}).get('os_disk', {}).get('vhd'): - raise CLIError('Adding unmanaged disks to a VM with managed disks is not supported') - blob_uri = vm['storage_profile']['os_disk']['vhd']['uri'] - vhd_uri = blob_uri[0:blob_uri.rindex('/') + 1] + disk_name + '.vhd' - - if lun is None: - lun = _get_disk_lun_by_aaz(vm.get('storage_profile', {}).get('data_disks')) - disk = { - 'caching': caching, - 'create_option': DiskCreateOptionTypes.EMPTY.value if new else DiskCreateOptionTypes.ATTACH.value, - 'disk_size_gb': size_gb if new else None, - 'lun': lun, - 'name': disk_name, - 'vhd': { - 'uri': vhd_uri - } - } - if not vm.get('storage_profile', {}).get('data_disks'): - vm['storage_profile']['data_disks'] = [] - vm['storage_profile']['data_disks'].append(disk) - return set_vm_by_aaz(cmd, vm) - - -def list_unmanaged_disks(cmd, resource_group_name, vm_name): - vm = get_vm_by_aaz(cmd, resource_group_name, vm_name) - return vm.get('storageProfile', {}).get('dataDisks') -# endregion - - -# region VirtualMachines Users -def _update_linux_access_extension(cmd, vm_instance, resource_group_name, protected_settings, - no_wait=False): - from .operations.vm_extension import VMExtensionCreate - - # pylint: disable=no-member - instance_name = _get_extension_instance_name_aaz(vm_instance.get('instanceView', {}), - extension_mappings[_LINUX_ACCESS_EXT]['publisher'], - _LINUX_ACCESS_EXT, - _ACCESS_EXT_HANDLER_NAME) - - publisher, version, auto_upgrade = _get_access_extension_upgrade_info_aaz( - vm_instance.get('resources', []), _LINUX_ACCESS_EXT) - - poller = VMExtensionCreate(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - 'vm_name': vm_instance['name'], - 'vm_extension_name': instance_name, - 'location': vm_instance['location'], - 'publisher': publisher, - 'type': _LINUX_ACCESS_EXT, - 'type_handler_version': version, - 'settings': {}, - 'protected_settings': protected_settings, - 'auto_upgrade_minor_version': auto_upgrade, - 'no_wait': no_wait - }) - return poller - - -def _set_linux_user(cmd, vm_instance, resource_group_name, username, - password=None, ssh_key_value=None, no_wait=False): - protected_settings = {} - protected_settings['username'] = username - if password: - protected_settings['password'] = password - elif not ssh_key_value and not password: # default to ssh - ssh_key_value = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub') - - if ssh_key_value: - protected_settings['ssh_key'] = read_content_if_is_file(ssh_key_value) - - if no_wait: - return _update_linux_access_extension(cmd, vm_instance, resource_group_name, - protected_settings, no_wait) - - poller = _update_linux_access_extension(cmd, vm_instance, resource_group_name, - protected_settings) - return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'setting user', 'done')(poller) - - -def _reset_windows_admin(cmd, vm_instance, resource_group_name, username, password, no_wait=False): - '''Update the password. You can only change the password. Adding a new user is not supported. ''' - from .operations.vm_extension import VMExtensionCreate - - publisher, version, auto_upgrade = _get_access_extension_upgrade_info_aaz( - vm_instance.get('resources', []), _WINDOWS_ACCESS_EXT) - # pylint: disable=no-member - instance_name = _get_extension_instance_name_aaz(vm_instance.get('instanceView', {}), - publisher, - _WINDOWS_ACCESS_EXT, - _ACCESS_EXT_HANDLER_NAME) - - poller = VMExtensionCreate(cli_ctx=cmd.cli_ctx)(command_args={ - 'location': vm_instance['location'], - 'resource_group': resource_group_name, - 'vm_name': vm_instance['name'], - 'vm_extension_name': instance_name, - 'publisher': publisher, - 'type': _WINDOWS_ACCESS_EXT, - 'type_handler_version': version, - 'auto_upgrade_minor_version': auto_upgrade, - 'settings': {'UserName': username}, - 'protected_settings': {'Password': password}, - 'no_wait': no_wait - }) - - if no_wait: - return poller - - return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'resetting admin', 'done')(poller) - - -def set_user(cmd, resource_group_name, vm_name, username, password=None, ssh_key_value=None, - no_wait=False): - from .operations.vm import VMShow - vm = VMShow(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - 'vm_name': vm_name, - 'expand': 'instanceView' - }) - if _is_linux_os_aaz(vm): - return _set_linux_user(cmd, vm, resource_group_name, username, password, ssh_key_value, no_wait) - if ssh_key_value: - raise CLIError('SSH key is not appliable on a Windows VM') - return _reset_windows_admin(cmd, vm, resource_group_name, username, password, no_wait) - - -def delete_user(cmd, resource_group_name, vm_name, username, no_wait=False): - from .operations.vm import VMShow - vm = VMShow(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - 'vm_name': vm_name, - 'expand': 'instanceView' - }) - if not _is_linux_os_aaz(vm): - raise CLIError('Deleting a user is not supported on Windows VM') - if no_wait: - return _update_linux_access_extension(cmd, vm, resource_group_name, - {'remove_user': username}, no_wait) - - poller = _update_linux_access_extension(cmd, vm, resource_group_name, - {'remove_user': username}) - return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'deleting user', 'done')(poller) - - -def reset_linux_ssh(cmd, resource_group_name, vm_name, no_wait=False): - from .operations.vm import VMShow - vm = VMShow(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - 'vm_name': vm_name, - 'expand': 'instanceView' - }) - if not _is_linux_os_aaz(vm): - raise CLIError('Resetting SSH is not supported in Windows VM') - if no_wait: - return _update_linux_access_extension(cmd, vm, resource_group_name, - {'reset_ssh': True}, no_wait) - - poller = _update_linux_access_extension(cmd, vm, resource_group_name, - {'reset_ssh': True}) - return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'resetting SSH', 'done')(poller) -# endregion - - -# region VirtualMachineScaleSets -def assign_vmss_identity(cmd, resource_group_name, vmss_name, assign_identity=None, identity_role=None, - identity_role_id=None, identity_scope=None): - identity, _, external_identities, enable_local_identity = _build_identities_info(assign_identity) - from ._vm_utils import assign_identity as assign_identity_helper, UpgradeMode - - command_args = {'resource_group': resource_group_name, 'vm_scale_set_name': vmss_name} - - def getter(): - return get_vmss_by_aaz(cmd, resource_group_name, vmss_name) - - def setter(vmss, external_identities=external_identities): - if vmss.get('identity', {}).get('type', None) == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value: - identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value - elif vmss.get('identity', {}).get('type', None) == IdentityType.SYSTEM_ASSIGNED.value and external_identities: - identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value - elif vmss.get('identity', {}).get('type', None) == IdentityType.USER_ASSIGNED.value and enable_local_identity: - identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value - elif external_identities and enable_local_identity: - identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value - elif external_identities: - identity_types = IdentityType.USER_ASSIGNED.value - else: - identity_types = IdentityType.SYSTEM_ASSIGNED.value - - if identity_types == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value: - command_args['mi_system_assigned'] = "True" - command_args['mi_user_assigned'] = [] - elif identity_types == IdentityType.USER_ASSIGNED.value: - command_args['mi_user_assigned'] = [] - else: - command_args['mi_system_assigned'] = "True" - command_args['mi_user_assigned'] = [] - - if vmss.get('identity', {}).get('userAssignedIdentities', None): - for key in vmss.get('identity').get('userAssignedIdentities').keys(): - command_args['mi_user_assigned'].append(key) - - if identity.get('userAssignedIdentities'): - for key in identity.get('userAssignedIdentities', {}).keys(): - if key not in command_args['mi_user_assigned']: - command_args['mi_user_assigned'].append(key) - - from .operations.vmss import VMSSPatch - update_vmss_identity = VMSSPatch(cli_ctx=cmd.cli_ctx)(command_args=command_args) - LongRunningOperation(cmd.cli_ctx)(update_vmss_identity) - result = update_vmss_identity.result() - return result - - assign_identity_helper(cmd.cli_ctx, getter, setter, identity_role=identity_role_id, identity_scope=identity_scope) - - vmss = getter() - if vmss.get('upgradePolicy', {}).get('mode', '') == UpgradeMode.MANUAL.value: - logger.warning("With manual upgrade mode, you will need to run 'az vmss update-instances -g %s -n %s " - "--instance-ids *' to propagate the change", resource_group_name, vmss_name) - - return _construct_identity_info( - identity_scope, - identity_role, - vmss.get('identity').get('principalId') if vmss.get('identity') else None, - vmss.get('identity').get('userAssignedIdentities') if vmss.get('identity') else None) - - -# pylint: disable=too-many-locals, too-many-statements -def create_vmss(cmd, vmss_name, resource_group_name, image=None, - disable_overprovision=None, instance_count=2, - location=None, tags=None, upgrade_policy_mode='manual', validate=False, - admin_username=None, admin_password=None, authentication_type=None, - vm_sku=None, no_wait=False, - ssh_dest_key_path=None, ssh_key_value=None, generate_ssh_keys=False, ssh_key_type=None, - load_balancer=None, load_balancer_sku=None, application_gateway=None, - app_gateway_subnet_address_prefix=None, - app_gateway_sku='Standard_Large', app_gateway_capacity=10, - backend_pool_name=None, nat_pool_name=None, backend_port=None, health_probe=None, - public_ip_address=None, public_ip_address_allocation=None, - public_ip_address_dns_name=None, accelerated_networking=None, - public_ip_per_vm=False, vm_domain_name=None, dns_servers=None, nsg=None, - os_caching=None, data_caching=None, - storage_container_name='vhds', storage_sku=None, - os_type=None, os_disk_name=None, - use_unmanaged_disk=False, data_disk_sizes_gb=None, disk_info=None, - vnet_name=None, vnet_address_prefix='10.0.0.0/16', - subnet=None, subnet_address_prefix=None, - os_offer=None, os_publisher=None, os_sku=None, os_version=None, - load_balancer_type=None, app_gateway_type=None, vnet_type=None, - public_ip_address_type=None, storage_profile=None, - single_placement_group=None, custom_data=None, secrets=None, platform_fault_domain_count=None, - plan_name=None, plan_product=None, plan_publisher=None, plan_promotion_code=None, license_type=None, - assign_identity=None, identity_scope=None, identity_role=None, encryption_identity=None, - identity_role_id=None, zones=None, priority=None, eviction_policy=None, - application_security_groups=None, ultra_ssd_enabled=None, - ephemeral_os_disk=None, ephemeral_os_disk_placement=None, - proximity_placement_group=None, aux_subscriptions=None, terminate_notification_time=None, - max_price=None, computer_name_prefix=None, orchestration_mode=None, scale_in_policy=None, - os_disk_encryption_set=None, data_disk_encryption_sets=None, data_disk_iops=None, data_disk_mbps=None, - automatic_repairs_grace_period=None, specialized=None, os_disk_size_gb=None, encryption_at_host=None, - host_group=None, max_batch_instance_percent=None, max_unhealthy_instance_percent=None, - max_unhealthy_upgraded_instance_percent=None, pause_time_between_batches=None, - enable_cross_zone_upgrade=None, prioritize_unhealthy_instances=None, edge_zone=None, - user_data=None, network_api_version=None, enable_spot_restore=None, spot_restore_timeout=None, - capacity_reservation_group=None, enable_auto_update=None, patch_mode=None, enable_agent=None, - security_type=None, enable_secure_boot=None, enable_vtpm=None, automatic_repairs_action=None, - v_cpus_available=None, v_cpus_per_core=None, accept_term=None, - disable_integrity_monitoring=None, # Unused - enable_integrity_monitoring=False, enable_auto_os_upgrade=None, - os_disk_security_encryption_type=None, os_disk_secure_vm_disk_encryption_set=None, - os_disk_delete_option=None, data_disk_delete_option=None, regular_priority_count=None, - regular_priority_percentage=None, disk_controller_type=None, nat_rule_name=None, - enable_osimage_notification=None, max_surge=None, disable_integrity_monitoring_autoupgrade=False, - enable_hibernation=None, enable_proxy_agent=None, proxy_agent_mode=None, - security_posture_reference_id=None, security_posture_reference_exclude_extensions=None, - enable_resilient_creation=None, enable_resilient_deletion=None, - additional_scheduled_events=None, enable_user_reboot_scheduled_events=None, - enable_user_redeploy_scheduled_events=None, skuprofile_vmsizes=None, - skuprofile_allostrat=None, skuprofile_rank=None, - security_posture_reference_is_overridable=None, zone_balance=None, wire_server_mode=None, - imds_mode=None, add_proxy_agent_extension=None, wire_server_access_control_profile_reference_id=None, - imds_access_control_profile_reference_id=None, enable_automatic_zone_balancing=None, - automatic_zone_balancing_strategy=None, automatic_zone_balancing_behavior=None, - enable_automatic_repairs=None): - from azure.cli.core.commands.client_factory import get_subscription_id - from azure.cli.core.util import random_string, hash_string - from azure.cli.core.commands.arm import ArmTemplateBuilder - from azure.cli.command_modules.vm._template_builder import (StorageProfile, build_vmss_resource, - build_vnet_resource, build_public_ip_resource, - build_load_balancer_resource, - build_vmss_storage_account_pool_resource, - build_application_gateway_resource, - build_msi_role_assignment, build_nsg_resource, - build_nat_rule_v2) - - # The default load balancer will be expected to be changed from Basic to Standard, and Basic will be removed. - # In order to avoid breaking change which has a big impact to users, - # we use the hint to guide users to use Standard load balancer to create VMSS in the first stage. - if load_balancer_sku == 'Basic': - logger.warning(remove_basic_option_msg, "--lb-sku Standard") - - # Build up the ARM template - master_template = ArmTemplateBuilder() - - uniform_str = 'Uniform' - flexible_str = 'Flexible' - if orchestration_mode: - from azure.mgmt.core.tools import resource_id, is_valid_resource_id - - if disk_info: - storage_sku = disk_info['os'].get('storageAccountType') - - subscription_id = get_subscription_id(cmd.cli_ctx) - - if os_disk_encryption_set is not None and not is_valid_resource_id(os_disk_encryption_set): - os_disk_encryption_set = resource_id( - subscription=subscription_id, resource_group=resource_group_name, - namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_encryption_set) - if os_disk_secure_vm_disk_encryption_set is not None and\ - not is_valid_resource_id(os_disk_secure_vm_disk_encryption_set): - os_disk_secure_vm_disk_encryption_set = resource_id( - subscription=subscription_id, resource_group=resource_group_name, - namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_secure_vm_disk_encryption_set) - - if data_disk_encryption_sets is None: - data_disk_encryption_sets = [] - for i, des in enumerate(data_disk_encryption_sets): - if des is not None and not is_valid_resource_id(des): - data_disk_encryption_sets[i] = resource_id( - subscription=subscription_id, resource_group=resource_group_name, - namespace='Microsoft.Compute', type='diskEncryptionSets', name=des) - - network_id_template = resource_id( - subscription=subscription_id, resource_group=resource_group_name, - namespace='Microsoft.Network') - - vmss_id = resource_id( - subscription=subscription_id, resource_group=resource_group_name, - namespace='Microsoft.Compute', type='virtualMachineScaleSets', name=vmss_name) - - scrubbed_name = vmss_name.replace('-', '').lower()[:5] - naming_prefix = '{}{}'.format(scrubbed_name, - hash_string(vmss_id, - length=(9 - len(scrubbed_name)), - force_lower=True)) - - # determine final defaults and calculated values - tags = tags or {} - os_disk_name = os_disk_name or ('osdisk_{}'.format(hash_string(vmss_id, length=10)) - if use_unmanaged_disk else None) - load_balancer = load_balancer or '{}LB'.format(vmss_name) - app_gateway = application_gateway or '{}AG'.format(vmss_name) - backend_pool_name = backend_pool_name or '{}BEPool'.format(load_balancer or application_gateway) - - vmss_dependencies = [] - - # VNET will always be a dependency - if vnet_type == 'new': - vnet_name = vnet_name or '{}VNET'.format(vmss_name) - subnet = subnet or '{}Subnet'.format(vmss_name) - vmss_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name)) - vnet = build_vnet_resource( - cmd, vnet_name, location, tags, vnet_address_prefix, subnet, subnet_address_prefix, edge_zone=edge_zone) - if app_gateway_type: - vnet['properties']['subnets'].append({ - 'name': 'appGwSubnet', - 'properties': { - 'addressPrefix': app_gateway_subnet_address_prefix - } - }) - master_template.add_resource(vnet) - if subnet: - subnet_id = subnet if is_valid_resource_id(subnet) else \ - '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template, vnet_name, subnet) - else: - subnet_id = None - - if vnet_name: - gateway_subnet_id = ('{}/virtualNetworks/{}/subnets/appGwSubnet'.format(network_id_template, vnet_name) - if app_gateway_type == 'new' else None) - else: - gateway_subnet_id = None - - # public IP is used by either load balancer/application gateway - public_ip_address_id = None - if public_ip_address: - public_ip_address_id = (public_ip_address if is_valid_resource_id(public_ip_address) - else '{}/publicIPAddresses/{}'.format(network_id_template, - public_ip_address)) - - def _get_public_ip_address_allocation(value, sku): - if not value: - value = 'Static' if (sku and sku.lower() == 'standard') else 'Dynamic' - return value - - # Handle load balancer creation - if load_balancer_type == 'new': - vmss_dependencies.append('Microsoft.Network/loadBalancers/{}'.format(load_balancer)) - - lb_dependencies = [] - if vnet_type == 'new': - lb_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name)) - if public_ip_address_type == 'new': - public_ip_address = public_ip_address or '{}PublicIP'.format(load_balancer) - lb_dependencies.append( - 'Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address)) - master_template.add_resource(build_public_ip_resource( - cmd, public_ip_address, location, tags, - _get_public_ip_address_allocation(public_ip_address_allocation, load_balancer_sku), - public_ip_address_dns_name, load_balancer_sku, zones, edge_zone=edge_zone)) - public_ip_address_id = '{}/publicIPAddresses/{}'.format(network_id_template, - public_ip_address) - - if nat_rule_name and nat_pool_name: - from azure.cli.core.azclierror import MutuallyExclusiveArgumentError - raise MutuallyExclusiveArgumentError( - 'Please do not pass in both "--nat-pool-name" and "--nat-rule-name" parameters at the same time.' - '"--nat-rule-name" parameter is recommended') - - is_basic_lb_sku = not load_balancer_sku or load_balancer_sku.lower() != 'standard' - # calculate default names if not provided - if orchestration_mode.lower() == flexible_str.lower(): - # inbound nat pools are not supported on VMSS Flex - nat_pool_name = None - elif nat_pool_name or (not nat_rule_name and is_basic_lb_sku): - nat_pool_name = nat_pool_name or '{}NatPool'.format(load_balancer) - - if not backend_port: - backend_port = 3389 if os_type == 'windows' else 22 - - frontend_ip_name = 'loadBalancerFrontEnd' - lb_resource = build_load_balancer_resource( - cmd, load_balancer, location, tags, backend_pool_name, nat_pool_name, backend_port, - frontend_ip_name, public_ip_address_id, subnet_id, private_ip_address='', - private_ip_allocation='Dynamic', sku=load_balancer_sku, instance_count=instance_count, - disable_overprovision=disable_overprovision, edge_zone=edge_zone) - lb_resource['dependsOn'] = lb_dependencies - master_template.add_resource(lb_resource) - - # Per https://learn.microsoft.com/azure/load-balancer/load-balancer-standard-overview#nsg - if load_balancer_sku and load_balancer_sku.lower() == 'standard' and nsg is None and os_type: - nsg_name = '{}NSG'.format(vmss_name) - master_template.add_resource(build_nsg_resource( - None, nsg_name, location, tags, 'rdp' if os_type.lower() == 'windows' else 'ssh')) - nsg = "[resourceId('Microsoft.Network/networkSecurityGroups', '{}')]".format(nsg_name) - vmss_dependencies.append('Microsoft.Network/networkSecurityGroups/{}'.format(nsg_name)) - - # Since NAT rule V2 can work for both Uniform and Flex VMSS, but basic LB SKU cannot fully support it - # So when users use Standard LB SKU, CLI uses NAT rule V2 by default - if not nat_pool_name: - - if nat_rule_name and is_basic_lb_sku: - logger.warning( - 'Since the basic SKU of load balancer cannot fully support NAT rule V2, ' - 'it is recommended to specify "--lb-sku Standard" to use standard SKU instead.') - - nat_rule_name = nat_rule_name or 'NatRule' - # The nested resource must follow the pattern parent_resource_name/nested_res_name - nat_rule_name = '{}/{}'.format(load_balancer, nat_rule_name) - nat_rule = build_nat_rule_v2(cmd, nat_rule_name, location, load_balancer, frontend_ip_name, - backend_pool_name, backend_port, instance_count, disable_overprovision) - master_template.add_resource(nat_rule) - - # Or handle application gateway creation - if app_gateway_type == 'new': - vmss_dependencies.append('Microsoft.Network/applicationGateways/{}'.format(app_gateway)) - - ag_dependencies = [] - if vnet_type == 'new': - ag_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name)) - if public_ip_address_type == 'new': - public_ip_address = public_ip_address or '{}PublicIP'.format(app_gateway) - ag_dependencies.append( - 'Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address)) - master_template.add_resource(build_public_ip_resource( - cmd, public_ip_address, location, tags, - _get_public_ip_address_allocation(public_ip_address_allocation, None), public_ip_address_dns_name, - None, zones)) - public_ip_address_id = '{}/publicIPAddresses/{}'.format(network_id_template, - public_ip_address) - - # calculate default names if not provided - backend_port = backend_port or 80 - - ag_resource = build_application_gateway_resource( - cmd, app_gateway, location, tags, backend_pool_name, backend_port, 'appGwFrontendIP', - public_ip_address_id, subnet_id, gateway_subnet_id, private_ip_address='', - private_ip_allocation='Dynamic', sku=app_gateway_sku, capacity=app_gateway_capacity) - ag_resource['dependsOn'] = ag_dependencies - master_template.add_variable( - 'appGwID', - "[resourceId('Microsoft.Network/applicationGateways', '{}')]".format(app_gateway)) - master_template.add_resource(ag_resource) - - # create storage accounts if needed for unmanaged disk storage - if storage_profile == StorageProfile.SAPirImage: - master_template.add_resource(build_vmss_storage_account_pool_resource( - cmd, 'storageLoop', location, tags, storage_sku, edge_zone)) - master_template.add_variable('storageAccountNames', [ - '{}{}'.format(naming_prefix, x) for x in range(5) - ]) - master_template.add_variable('vhdContainers', [ - "[concat('https://', variables('storageAccountNames')[{}], '.blob.{}/{}')]".format( - x, cmd.cli_ctx.cloud.suffixes.storage_endpoint, storage_container_name) for x in range(5) - ]) - vmss_dependencies.append('storageLoop') - - backend_address_pool_id = None - inbound_nat_pool_id = None - if load_balancer_type or app_gateway_type: - network_balancer = load_balancer if load_balancer_type else app_gateway - balancer_type = 'loadBalancers' if load_balancer_type else 'applicationGateways' - - if is_valid_resource_id(network_balancer): - # backend address pool needed by load balancer or app gateway - backend_address_pool_id = '{}/backendAddressPools/{}'.format(network_balancer, backend_pool_name) - if nat_pool_name: - inbound_nat_pool_id = '{}/inboundNatPools/{}'.format(network_balancer, nat_pool_name) - else: - # backend address pool needed by load balancer or app gateway - backend_address_pool_id = '{}/{}/{}/backendAddressPools/{}'.format( - network_id_template, balancer_type, network_balancer, backend_pool_name) - if nat_pool_name: - inbound_nat_pool_id = '{}/{}/{}/inboundNatPools/{}'.format( - network_id_template, balancer_type, network_balancer, nat_pool_name) - - if health_probe and not is_valid_resource_id(health_probe): - health_probe = '{}/loadBalancers/{}/probes/{}'.format(network_id_template, load_balancer, health_probe) - - ip_config_name = '{}IPConfig'.format(naming_prefix) - nic_name = '{}Nic'.format(naming_prefix) - - if custom_data: - custom_data = read_content_if_is_file(custom_data) - - if user_data: - user_data = read_content_if_is_file(user_data) - - if secrets: - secrets = _merge_secrets([validate_file_or_dict(secret) for secret in secrets]) - - if computer_name_prefix is not None and isinstance(computer_name_prefix, str): - naming_prefix = computer_name_prefix - - if orchestration_mode.lower() == uniform_str.lower(): - computer_name_prefix = naming_prefix - - if os_version and os_version != 'latest': - logger.warning('You are deploying VMSS pinned to a specific image version from Azure Marketplace. ' - 'Consider using "latest" as the image version.') - - vmss_resource = build_vmss_resource( - cmd=cmd, name=vmss_name, computer_name_prefix=computer_name_prefix, location=location, tags=tags, - overprovision=not disable_overprovision if orchestration_mode.lower() == uniform_str.lower() else None, - upgrade_policy_mode=upgrade_policy_mode, vm_sku=vm_sku, - instance_count=instance_count, ip_config_name=ip_config_name, nic_name=nic_name, subnet_id=subnet_id, - public_ip_per_vm=public_ip_per_vm, vm_domain_name=vm_domain_name, dns_servers=dns_servers, nsg=nsg, - accelerated_networking=accelerated_networking, admin_username=admin_username, - authentication_type=authentication_type, storage_profile=storage_profile, os_disk_name=os_disk_name, - disk_info=disk_info, os_type=os_type, image=image, admin_password=admin_password, - ssh_key_values=ssh_key_value, ssh_key_path=ssh_dest_key_path, os_publisher=os_publisher, os_offer=os_offer, - os_sku=os_sku, os_version=os_version, backend_address_pool_id=backend_address_pool_id, - inbound_nat_pool_id=inbound_nat_pool_id, health_probe=health_probe, - single_placement_group=single_placement_group, platform_fault_domain_count=platform_fault_domain_count, - custom_data=custom_data, secrets=secrets, license_type=license_type, zones=zones, priority=priority, - eviction_policy=eviction_policy, application_security_groups=application_security_groups, - ultra_ssd_enabled=ultra_ssd_enabled, proximity_placement_group=proximity_placement_group, - terminate_notification_time=terminate_notification_time, max_price=max_price, - scale_in_policy=scale_in_policy, os_disk_encryption_set=os_disk_encryption_set, - data_disk_encryption_sets=data_disk_encryption_sets, data_disk_iops=data_disk_iops, - data_disk_mbps=data_disk_mbps, automatic_repairs_grace_period=automatic_repairs_grace_period, - specialized=specialized, os_disk_size_gb=os_disk_size_gb, encryption_at_host=encryption_at_host, - host_group=host_group, max_batch_instance_percent=max_batch_instance_percent, - max_unhealthy_instance_percent=max_unhealthy_instance_percent, - max_unhealthy_upgraded_instance_percent=max_unhealthy_upgraded_instance_percent, - pause_time_between_batches=pause_time_between_batches, enable_cross_zone_upgrade=enable_cross_zone_upgrade, - prioritize_unhealthy_instances=prioritize_unhealthy_instances, edge_zone=edge_zone, user_data=user_data, - orchestration_mode=orchestration_mode, network_api_version=network_api_version, - enable_spot_restore=enable_spot_restore, spot_restore_timeout=spot_restore_timeout, - capacity_reservation_group=capacity_reservation_group, enable_auto_update=enable_auto_update, - patch_mode=patch_mode, enable_agent=enable_agent, security_type=security_type, - enable_secure_boot=enable_secure_boot, enable_vtpm=enable_vtpm, - automatic_repairs_action=automatic_repairs_action, v_cpus_available=v_cpus_available, - v_cpus_per_core=v_cpus_per_core, os_disk_security_encryption_type=os_disk_security_encryption_type, - os_disk_secure_vm_disk_encryption_set=os_disk_secure_vm_disk_encryption_set, - os_disk_delete_option=os_disk_delete_option, regular_priority_count=regular_priority_count, - regular_priority_percentage=regular_priority_percentage, disk_controller_type=disk_controller_type, - enable_osimage_notification=enable_osimage_notification, max_surge=max_surge, - enable_hibernation=enable_hibernation, enable_auto_os_upgrade=enable_auto_os_upgrade, - enable_proxy_agent=enable_proxy_agent, proxy_agent_mode=proxy_agent_mode, - security_posture_reference_id=security_posture_reference_id, - security_posture_reference_exclude_extensions=security_posture_reference_exclude_extensions, - enable_resilient_vm_creation=enable_resilient_creation, - enable_resilient_vm_deletion=enable_resilient_deletion, - additional_scheduled_events=additional_scheduled_events, - enable_user_reboot_scheduled_events=enable_user_reboot_scheduled_events, - enable_user_redeploy_scheduled_events=enable_user_redeploy_scheduled_events, - skuprofile_vmsizes=skuprofile_vmsizes, skuprofile_allostrat=skuprofile_allostrat, - skuprofile_rank=skuprofile_rank, - security_posture_reference_is_overridable=security_posture_reference_is_overridable, - zone_balance=zone_balance, wire_server_mode=wire_server_mode, imds_mode=imds_mode, - add_proxy_agent_extension=add_proxy_agent_extension, - wire_server_access_control_profile_reference_id=wire_server_access_control_profile_reference_id, - imds_access_control_profile_reference_id=imds_access_control_profile_reference_id, - enable_automatic_zone_balancing=enable_automatic_zone_balancing, - automatic_zone_balancing_strategy=automatic_zone_balancing_strategy, - automatic_zone_balancing_behavior=automatic_zone_balancing_behavior, - enable_automatic_repairs=enable_automatic_repairs) - - vmss_resource['dependsOn'] = vmss_dependencies - - if plan_name: - vmss_resource['plan'] = { - 'name': plan_name, - 'publisher': plan_publisher, - 'product': plan_product, - 'promotionCode': plan_promotion_code - } - - enable_local_identity = None - if assign_identity is not None: - vmss_resource['identity'], _, _, enable_local_identity = _build_identities_info( - assign_identity) - if identity_scope: - role_assignment_guid = str(_gen_guid()) - master_template.add_resource(build_msi_role_assignment(vmss_name, vmss_id, identity_role_id, - role_assignment_guid, identity_scope, False)) - if encryption_identity: - if 'identity' in vmss_resource and 'userAssignedIdentities' in vmss_resource['identity'] \ - and encryption_identity.lower() in \ - (k.lower() for k in vmss_resource['identity']['userAssignedIdentities'].keys()): - - if 'virtualMachineProfile' not in vmss_resource['properties']: - vmss_resource['properties']['virtualMachineProfile'] = {} - if 'securityProfile' not in vmss_resource['properties']['virtualMachineProfile']: - vmss_resource['properties']['virtualMachineProfile']['securityProfile'] = {} - if 'encryptionIdentity' not in vmss_resource['properties']['virtualMachineProfile']['securityProfile']: - vmss_resource['properties']['virtualMachineProfile']['securityProfile']['encryptionIdentity'] = {} - - vmss_securityProfile_EncryptionIdentity \ - = vmss_resource['properties']['virtualMachineProfile']['securityProfile']['encryptionIdentity'] - - if 'userAssignedIdentityResourceId' not in vmss_securityProfile_EncryptionIdentity or \ - vmss_securityProfile_EncryptionIdentity['userAssignedIdentityResourceId'] \ - != encryption_identity: - vmss_securityProfile_EncryptionIdentity['userAssignedIdentityResourceId'] = encryption_identity - vmss_resource['properties']['virtualMachineProfile']['securityProfile']['encryptionIdentity'] \ - = vmss_securityProfile_EncryptionIdentity - else: - raise ArgumentUsageError("Encryption Identity should be an ARM Resource ID of one of the " - "user assigned identities associated to the resource") - else: - raise CLIError('usage error: --orchestration-mode (Uniform | Flexible)') - - master_template.add_resource(vmss_resource) - master_template.add_output('VMSS', vmss_name, 'Microsoft.Compute', 'virtualMachineScaleSets', - output_type='object') - - if admin_password: - master_template.add_secure_parameter('adminPassword', admin_password) - - template = master_template.build() - parameters = master_template.build_parameters() - - # deploy ARM template - deployment_name = 'vmss_deploy_' + random_string(32) - client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, - aux_subscriptions=aux_subscriptions).deployments - - DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) - properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental') - - if validate: - from azure.cli.command_modules.vm._vm_utils import log_pprint_template - log_pprint_template(template) - log_pprint_template(parameters) - - Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) - deployment = Deployment(properties=properties) - if validate: - if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES): - validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment) - return LongRunningOperation(cmd.cli_ctx)(validation_poller) - - return client.validate(resource_group_name, deployment_name, deployment) - - # creates the VMSS deployment - deployment_result = DeploymentOutputLongRunningOperation(cmd.cli_ctx)( - sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)) - - if orchestration_mode.lower() == uniform_str.lower() and assign_identity is not None: - vmss_info = get_vmss(cmd, resource_group_name, vmss_name) - if enable_local_identity and not identity_scope: - _show_missing_access_warning(resource_group_name, vmss_name, 'vmss') - deployment_result['vmss']['identity'] = _construct_identity_info(identity_scope, identity_role, - vmss_info.identity.principal_id, - vmss_info.identity.user_assigned_identities) - # Guest Attestation Extension and enable System Assigned MSI by default - is_trusted_launch = security_type and security_type.lower() == 'trustedlaunch' and\ - enable_vtpm and enable_secure_boot - is_confidential_vm = security_type and security_type.lower() == 'confidentialvm' - if (is_trusted_launch or is_confidential_vm) and enable_integrity_monitoring: - client = _compute_client_factory(cmd.cli_ctx) - vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) - vmss.virtual_machine_profile.storage_profile.image_reference = None - VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models( - 'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile') - if vmss.virtual_machine_profile.storage_profile.os_disk.os_type == 'Linux': - publisher = 'Microsoft.Azure.Security.LinuxAttestation' - if vmss.virtual_machine_profile.storage_profile.os_disk.os_type == 'Windows': - publisher = 'Microsoft.Azure.Security.WindowsAttestation' - version = _normalize_extension_version(cmd.cli_ctx, publisher, 'GuestAttestation', None, vmss.location) - ext = VirtualMachineScaleSetExtension(name='GuestAttestation', - publisher=publisher, - type_properties_type='GuestAttestation', - protected_settings=None, - type_handler_version=version, - settings=None, - auto_upgrade_minor_version=True, - provision_after_extensions=None, - enable_automatic_upgrade=not disable_integrity_monitoring_autoupgrade) - if not vmss.virtual_machine_profile.extension_profile: - vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(extensions=[]) - vmss.virtual_machine_profile.extension_profile.extensions.append(ext) - try: - LongRunningOperation(cmd.cli_ctx)(client.virtual_machine_scale_sets.begin_create_or_update( - resource_group_name, vmss_name, vmss)) - logger.info('Guest Attestation Extension has been successfully installed by default' - 'when Trusted Launch configuration is met') - VirtualMachineScaleSetVMInstanceRequiredIDs = cmd.get_models('VirtualMachineScaleSetVMInstanceRequiredIDs') - instance_ids = VirtualMachineScaleSetVMInstanceRequiredIDs(instance_ids=['*']) - LongRunningOperation(cmd.cli_ctx)(client.virtual_machine_scale_sets.begin_update_instances( - resource_group_name, vmss_name, instance_ids)) - except Exception as e: - error_type = "Trusted Launch" if is_trusted_launch else "Confidential VM" - logger.error('Failed to install Guest Attestation Extension for %s. %s', error_type, e) - - return deployment_result - - -def _build_identities_info(identities): - from ._vm_utils import MSI_LOCAL_ID - identities = identities or [] - identity_types = [] - if not identities or MSI_LOCAL_ID in identities: - identity_types.append('SystemAssigned') - external_identities = [x for x in identities if x != MSI_LOCAL_ID] - if external_identities: - identity_types.append('UserAssigned') - identity_types = ','.join(identity_types) - info = {'type': identity_types} - if external_identities: - info['userAssignedIdentities'] = {e: {} for e in external_identities} - return (info, identity_types, external_identities, 'SystemAssigned' in identity_types) - - -def deallocate_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False, hibernate=None): - from .aaz.latest.vmss import Deallocate as VmssDeallocate - from .aaz.latest.vmss.vms import Deallocate as VmssVmsDeallocate - # This is a workaround because the REST service of `VirtualMachineScaleSetVMs#begin_deallocate` - # does not accept `hibernate` at present - if instance_ids and len(instance_ids) == 1 and hibernate is None: - command_args = { - 'instance_id': instance_ids[0], - 'resource_group': resource_group_name, - 'vm_scale_set_name': vm_scale_set_name, - 'no_wait': no_wait - } - return VmssVmsDeallocate(cli_ctx=cmd.cli_ctx)(command_args=command_args) - - command_args = { - 'resource_group': resource_group_name, - 'vm_scale_set_name': vm_scale_set_name, - 'instance_ids': instance_ids, - 'no_wait': no_wait - } - if hibernate is not None: - command_args['hibernate'] = hibernate - - return VmssDeallocate(cli_ctx=cmd.cli_ctx)(command_args=command_args) - - -def get_vmss(cmd, resource_group_name, name, instance_id=None, include_user_data=False): - client = _compute_client_factory(cmd.cli_ctx) - - expand = None - if include_user_data: - expand = 'userData' - - if instance_id is not None: - if cmd.supported_api_version(min_api='2020-12-01', operation_group='virtual_machine_scale_sets'): - return client.virtual_machine_scale_set_vms.get(resource_group_name=resource_group_name, - vm_scale_set_name=name, instance_id=instance_id, - expand=expand) - return client.virtual_machine_scale_set_vms.get(resource_group_name=resource_group_name, - vm_scale_set_name=name, instance_id=instance_id) - - if cmd.supported_api_version(min_api='2021-03-01', operation_group='virtual_machine_scale_sets'): - return client.virtual_machine_scale_sets.get(resource_group_name, name, expand=expand) - return client.virtual_machine_scale_sets.get(resource_group_name, name) - - -def get_vmss_by_aaz(cmd, resource_group_name, name, instance_id=None, include_user_data=False): - from .operations.vmss import VMSSShow - from .operations.vmss_vms import VMSSVMSShow - - command_args = { - 'resource_group': resource_group_name, - 'vm_scale_set_name': name, - } - - if include_user_data: - command_args['expand'] = 'userData' - - if instance_id is not None: - command_args['instance_id'] = instance_id - return VMSSVMSShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) - return VMSSShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) - - -def _check_vmss_hyper_v_generation(cli_ctx, vmss): - hyper_v_generation = get_hyper_v_generation_from_vmss( - cli_ctx, vmss.virtual_machine_profile.storage_profile.image_reference, vmss.location) - security_profile = vmss.virtual_machine_profile.security_profile - security_type = security_profile.security_type if security_profile else None - - if hyper_v_generation == "V1" or (hyper_v_generation == "V2" and security_type is None): - logger.warning("Trusted Launch security type is supported on Hyper-V Generation 2 OS Images. " - "To know more please visit " - "https://learn.microsoft.com/en-us/azure/virtual-machines/trusted-launch") - elif hyper_v_generation == "V2" and security_type == "ConfidentialVM": - from azure.cli.core.azclierror import InvalidArgumentValueError - raise InvalidArgumentValueError("{} is already configured with {}. " - "Security Configuration cannot be updated from ConfidentialVM to " - "TrustedLaunch.".format(vmss.name, security_type)) - - -def _check_vmss_hyper_v_generation_by_aaz(cli_ctx, vmss): - hyper_v_generation = get_hyper_v_generation_from_vmss_by_aaz( - cli_ctx, vmss.get("virtualMachineProfile", {}).get("storageProfile", {}).get("imageReference", {}), vmss["location"]) # pylint: disable=line-too-long - security_profile = vmss.get("virtualMachineProfile", {}).get("securityProfile", {}) - security_type = security_profile.get("securityType", None) - - if hyper_v_generation == "V1" or (hyper_v_generation == "V2" and security_type is None): - logger.warning("Trusted Launch security type is supported on Hyper-V Generation 2 OS Images. " - "To know more please visit " - "https://learn.microsoft.com/en-us/azure/virtual-machines/trusted-launch") - elif hyper_v_generation == "V2" and security_type == "ConfidentialVM": - from azure.cli.core.azclierror import InvalidArgumentValueError - raise InvalidArgumentValueError("{} is already configured with {}. " - "Security Configuration cannot be updated from ConfidentialVM to " - "TrustedLaunch.".format(vmss["name"], security_type)) - - -def get_vmss_modified(cmd, resource_group_name, name, instance_id=None, security_type=None): - client = _compute_client_factory(cmd.cli_ctx) - if instance_id is not None: - vms = client.virtual_machine_scale_set_vms.get(resource_group_name=resource_group_name, - vm_scale_set_name=name, instance_id=instance_id) - # To avoid unnecessary permission check of image - if hasattr(vms, "storage_profile") and vms.storage_profile: - vms.storage_profile.image_reference = None - return vms - - vmss = client.virtual_machine_scale_sets.get(resource_group_name, name) - if security_type == 'TrustedLaunch': - _check_vmss_hyper_v_generation(cmd.cli_ctx, vmss) - # To avoid unnecessary permission check of image - if hasattr(vmss, "virtual_machine_profile") and vmss.virtual_machine_profile \ - and vmss.virtual_machine_profile.storage_profile: - vmss.virtual_machine_profile.storage_profile.image_reference = None - return vmss - - -def get_vmss_modified_by_aaz(cmd, resource_group_name, name, instance_id=None, security_type=None): - if instance_id is not None: - from .operations.vmss_vms import VMSSVMSShow - vms = VMSSVMSShow(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - "vm_scale_set_name": name, - "instance_id": instance_id - }) - - # To avoid unnecessary permission check of image - if vms.get("storageProfile", None) is not None: - vms["storageProfile"]["imageReference"] = None - return vms - - from .operations.vmss import VMSSShow - vmss = VMSSShow(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - "vm_scale_set_name": name, - }) - - if security_type == 'TrustedLaunch': - _check_vmss_hyper_v_generation_by_aaz(cmd.cli_ctx, vmss) - # To avoid unnecessary permission check of image - if vmss.get("virtualMachineProfile", {}).get("storageProfile", None) is not None: - vmss["virtualMachineProfile"]["storageProfile"]["imageReference"] = None - return vmss - - -def get_instances_list(cmd, resource_group_name, virtual_machine_scale_set_name, expand=None, filter=None, - select=None, pagination_limit=None, pagination_token=None, resiliency_view=False, **kwargs): - get_list_args = kwargs - get_list_args['resource_group'] = resource_group_name - get_list_args['virtual_machine_scale_set_name'] = virtual_machine_scale_set_name - get_list_args['expand'] = expand - get_list_args['filter'] = filter - get_list_args['select'] = select - get_list_args['pagination_limit'] = pagination_limit - get_list_args['pagination_token'] = pagination_token - - from .operations.vmss import VMSSListInstances - instances = VMSSListInstances(cli_ctx=cmd.cli_ctx)(command_args=get_list_args) - - if not resiliency_view: - return instances - - instances_id = [instance['instanceId'] for instance in instances] - - from .operations.vmss_vms import VMSSGetResiliencyView - return [VMSSGetResiliencyView(cli_ctx=cmd.cli_ctx)(command_args={ - 'instance_id': instance_id, - 'resource_group': resource_group_name, - 'vm_scale_set_name': virtual_machine_scale_set_name, - }) for instance_id in instances_id] - - -def get_vmss_instance_view(cmd, resource_group_name, vm_scale_set_name, instance_id=None): - if instance_id: - if instance_id == '*': - from .aaz.latest.vmss import ListInstances as VMSSListInstances - result = VMSSListInstances(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - 'virtual_machine_scale_set_name': vm_scale_set_name, - 'select': 'instanceView', - 'expand': 'instanceView', - }) - return [x.get("instanceView", None) for x in result if x is not None] - - from .aaz.latest.vmss.vms.instance_view import Show as VMSSVMSInstanceViewShow - return VMSSVMSInstanceViewShow(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - 'vm_scale_set_name': vm_scale_set_name, - 'instance_id': instance_id, - }) - - from .aaz.latest.vmss.instance_view import Show as VMSSInstanceViewShow - return VMSSInstanceViewShow(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - 'vm_scale_set_name': vm_scale_set_name, - }) - - -def list_vmss_instance_connection_info(cmd, resource_group_name, vm_scale_set_name): - from azure.mgmt.core.tools import parse_resource_id - from .operations.vmss import VMSSShow - - LBShow = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.lb").Show - PublicIPAddress = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.public_ip").Show - - command_args = { - 'resource_group': resource_group_name, - 'vm_scale_set_name': vm_scale_set_name - } - vmss = VMSSShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) - - from ._vm_utils import raise_unsupported_error_for_flex_vmss_by_aaz - raise_unsupported_error_for_flex_vmss_by_aaz( - vmss, 'This command is not available for VMSS in Flex mode. ' - 'Please use the "az network public-ip list/show" to retrieve networking information.') - - # find the load balancer - nic_configs = \ - vmss.get('virtualMachineProfile', {}).get('networkProfile', {}).get('networkInterfaceConfigurations', []) - primary_nic_config = next((n for n in nic_configs if n.get('primary')), {}) - if primary_nic_config is None: - raise CLIError('could not find a primary NIC which is needed to search to load balancer') - - res_id = None - for ip in primary_nic_config.get('ipConfigurations', []): - if len(ip.get('loadBalancerInboundNatPools', [])) > 0: - res_id = ip['loadBalancerInboundNatPools'][0].get('id') - break - if len(ip.get('loadBalancerBackendAddressPools', [])) > 0: - res_id = ip['loadBalancerBackendAddressPools'][0].get('id') - break - - if not res_id: - raise ResourceNotFoundError('No load balancer exists to retrieve public IP address') - - lb_info = parse_resource_id(res_id) - lb_name = lb_info['name'] - lb_rg = lb_info['resource_group'] - - # get public ip - lb = LBShow(cli_ctx=cmd.cli_ctx)(command_args={ - 'name': lb_name, - 'resource_group': lb_rg - }) - if 'publicIPAddress' in lb['frontendIPConfigurations'][0]: - res_id = lb['frontendIPConfigurations'][0]['publicIPAddress']['id'] - public_ip_info = parse_resource_id(res_id) - public_ip_name = public_ip_info['name'] - public_ip_rg = public_ip_info['resource_group'] - public_ip = PublicIPAddress(cli_ctx=cmd.cli_ctx)(command_args={ - 'name': public_ip_name, - 'resource_group': public_ip_rg - }) - public_ip_address = public_ip['ipAddress'] if 'ipAddress' in public_ip else None - # For NAT pool, get the frontend port and VMSS instance from inboundNatRules - is_nat_pool = True - instance_addresses = {} - for rule in lb['inboundNatRules']: - # If backend_ip_configuration does not exist, it means that NAT rule V2 is used - if 'backendIPConfiguration' not in rule or not rule['backendIPConfiguration']: - is_nat_pool = False - break - instance_id = parse_resource_id(rule['backendIPConfiguration']['id'])['child_name_1'] - instance_addresses['instance ' + instance_id] = '{}:{}'.format(public_ip_address, - rule['frontendPort']) - if is_nat_pool: - return instance_addresses - - # For NAT rule V2, get the frontend port and VMSS instance from loadBalancerBackendAddresses - for backend_address_pool in lb['backendAddressPools']: - if 'loadBalancerBackendAddresses' not in backend_address_pool or \ - not backend_address_pool['loadBalancerBackendAddresses']: - raise CLIError('There is no connection information. ' - 'If you are using NAT rule V2, please confirm whether the load balancer SKU is Standard') - - for load_balancer_backend_addresse in backend_address_pool['loadBalancerBackendAddresses']: - - network_interface_ip_configuration = load_balancer_backend_addresse['networkInterfaceIPConfiguration'] - if not network_interface_ip_configuration or 'id' not in network_interface_ip_configuration: - continue - instance_id = parse_resource_id(network_interface_ip_configuration['id'])['child_name_1'] - - if not load_balancer_backend_addresse['inboundNatRulesPortMapping']: - continue - frontend_port = load_balancer_backend_addresse['inboundNatRulesPortMapping'][0]['frontendPort'] - instance_addresses['instance ' + instance_id] = '{}:{}'.format(public_ip_address, frontend_port) - - return instance_addresses - raise CLIError('The VM scale-set uses an internal load balancer, hence no connection information') - - -def list_vmss_instance_public_ips(cmd, resource_group_name, vm_scale_set_name): - from .operations.vmss import VMSSShow - ListInstancePublicIps = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "vmss").ListInstancePublicIps - - command_args = { - 'resource_group': resource_group_name, - 'vm_scale_set_name': vm_scale_set_name - } - vmss = VMSSShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) - from ._vm_utils import raise_unsupported_error_for_flex_vmss_by_aaz - raise_unsupported_error_for_flex_vmss_by_aaz( - vmss, 'This command is not available for VMSS in Flex mode. ' - 'Please use the "az network public-ip list/show" to retrieve networking information.') - - result = ListInstancePublicIps(cli_ctx=cmd.cli_ctx)(command_args={ - 'vmss_name': vm_scale_set_name, - 'resource_group': resource_group_name - }) - # filter away over-provisioned instances which are deleted after 'create/update' returns - return [r for r in result if 'ipAddress' in r and r['ipAddress']] - - -def reimage_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, - force_update_os_disk_for_ephemeral=None, no_wait=False): - from .aaz.latest.vmss import Reimageall as VmssReimageAll, Reimage as VmssReimage - if instance_ids: - command_args = { - 'resource_group': resource_group_name, - 'vm_scale_set_name': vm_scale_set_name, - 'instance_ids': instance_ids, - 'no_wait': no_wait - } - return VmssReimageAll(cli_ctx=cmd.cli_ctx)(command_args=command_args) - - command_args = { - 'resource_group': resource_group_name, - 'vm_scale_set_name': vm_scale_set_name, - 'no_wait': no_wait - } - if force_update_os_disk_for_ephemeral is not None: - command_args['force_update_os_disk_for_ephemeral'] = force_update_os_disk_for_ephemeral - return VmssReimage(cli_ctx=cmd.cli_ctx)(command_args=command_args) - - -def restart_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False): - from .aaz.latest.vmss import Restart as VmssRestart - if not instance_ids: - instance_ids = ['*'] - command_args = { - 'resource_group': resource_group_name, - 'vm_scale_set_name': vm_scale_set_name, - 'instance_ids': instance_ids, - 'no_wait': no_wait - } - return VmssRestart(cli_ctx=cmd.cli_ctx)(command_args=command_args) - - -# pylint: disable=inconsistent-return-statements -def scale_vmss(cmd, resource_group_name, vm_scale_set_name, new_capacity, no_wait=False): - VirtualMachineScaleSet = cmd.get_models('VirtualMachineScaleSet') - client = _compute_client_factory(cmd.cli_ctx) - vmss = client.virtual_machine_scale_sets.get(resource_group_name, vm_scale_set_name) - # pylint: disable=no-member - if vmss.sku.capacity == new_capacity: - return - - vmss.sku.capacity = new_capacity - vmss_new = VirtualMachineScaleSet(location=vmss.location, sku=vmss.sku) - if vmss.extended_location is not None: - vmss_new.extended_location = vmss.extended_location - return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_create_or_update, - resource_group_name, vm_scale_set_name, vmss_new) - - -def stop_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False, skip_shutdown=False): - client = _compute_client_factory(cmd.cli_ctx) - VirtualMachineScaleSetVMInstanceRequiredIDs = cmd.get_models('VirtualMachineScaleSetVMInstanceRequiredIDs') - if instance_ids is None: - instance_ids = ['*'] - instance_ids = VirtualMachineScaleSetVMInstanceRequiredIDs(instance_ids=instance_ids) - if cmd.supported_api_version(min_api='2020-06-01', operation_group='virtual_machine_scale_sets'): - return sdk_no_wait( - no_wait, client.virtual_machine_scale_sets.begin_power_off, resource_group_name, vm_scale_set_name, - vm_instance_i_ds=instance_ids, skip_shutdown=skip_shutdown) - return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_power_off, resource_group_name, - vm_scale_set_name, vm_instance_i_ds=instance_ids) - - -def update_vmss_instances(cmd, resource_group_name, vm_scale_set_name, instance_ids, no_wait=False): - from .aaz.latest.vmss import Manualupgrade - return Manualupgrade(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - 'vm_scale_set_name': vm_scale_set_name, - 'instance_ids': instance_ids, - 'no_wait': no_wait, - }) - - -def update_vmss(cmd, resource_group_name, name, license_type=None, no_wait=False, instance_id=None, - protect_from_scale_in=None, protect_from_scale_set_actions=None, - enable_terminate_notification=None, terminate_notification_time=None, ultra_ssd_enabled=None, - scale_in_policy=None, priority=None, max_price=None, proximity_placement_group=None, - enable_automatic_repairs=None, automatic_repairs_grace_period=None, max_batch_instance_percent=None, - max_unhealthy_instance_percent=None, max_unhealthy_upgraded_instance_percent=None, - pause_time_between_batches=None, enable_cross_zone_upgrade=None, prioritize_unhealthy_instances=None, - user_data=None, enable_spot_restore=None, spot_restore_timeout=None, capacity_reservation_group=None, - vm_sku=None, ephemeral_os_disk_placement=None, force_deletion=None, enable_secure_boot=None, - enable_vtpm=None, automatic_repairs_action=None, v_cpus_available=None, v_cpus_per_core=None, - regular_priority_count=None, regular_priority_percentage=None, disk_controller_type=None, - enable_osimage_notification=None, custom_data=None, enable_hibernation=None, - security_type=None, enable_proxy_agent=None, proxy_agent_mode=None, - security_posture_reference_id=None, security_posture_reference_exclude_extensions=None, - max_surge=None, enable_resilient_creation=None, enable_resilient_deletion=None, - ephemeral_os_disk=None, ephemeral_os_disk_option=None, zones=None, additional_scheduled_events=None, - enable_user_reboot_scheduled_events=None, enable_user_redeploy_scheduled_events=None, - upgrade_policy_mode=None, enable_auto_os_upgrade=None, skuprofile_vmsizes=None, - skuprofile_allostrat=None, skuprofile_rank=None, - security_posture_reference_is_overridable=None, zone_balance=None, - wire_server_mode=None, imds_mode=None, add_proxy_agent_extension=None, - wire_server_access_control_profile_reference_id=None, - imds_access_control_profile_reference_id=None, enable_automatic_zone_balancing=None, - automatic_zone_balancing_strategy=None, automatic_zone_balancing_behavior=None, **kwargs): - from .operations.vmss_vms import convert_show_result_to_snake_case as vmss_vms_convert_show_result_to_snake_case - from .operations.vmss import convert_show_result_to_snake_case as vmss_convert_show_result_to_snake_case - vmss = kwargs['parameters'] - if instance_id: - vmss = vmss_vms_convert_show_result_to_snake_case(vmss) - else: - vmss = vmss_convert_show_result_to_snake_case(vmss) - - if wire_server_access_control_profile_reference_id is not None or \ - imds_access_control_profile_reference_id is not None: - from .aaz.latest.vmss import Patch as VMSSPatchUpdate - - class VMSSUpdateReferenceId(VMSSPatchUpdate): - def _output(self, *args, **kwargs): - result = self.deserialize_output(self.ctx.vars.instance, client_flatten=False) - return result - - security_profile = {'proxy_agent_settings': {}} - if wire_server_access_control_profile_reference_id: - security_profile['proxy_agent_settings']['wire_server'] = { - 'in_vm_access_control_profile_reference_id': wire_server_access_control_profile_reference_id} - if imds_access_control_profile_reference_id: - security_profile['proxy_agent_settings']['imds'] = { - 'in_vm_access_control_profile_reference_id': imds_access_control_profile_reference_id} - - LongRunningOperation(cmd.cli_ctx)(VMSSUpdateReferenceId(cli_ctx=cmd.cli_ctx)(command_args={ - 'vm_scale_set_name': name, - 'resource_group': resource_group_name, - 'virtual_machine_profile': { - 'security_profile': security_profile - } - })) - vmss = get_vmss_modified_by_aaz(cmd, resource_group_name, name, instance_id, security_type) - if instance_id: - vmss = vmss_vms_convert_show_result_to_snake_case(vmss) - else: - vmss = vmss_convert_show_result_to_snake_case(vmss) - - if add_proxy_agent_extension is not None: - if instance_id: - if vmss.get("security_profile", None) is None: - vmss["security_profile"] = {} - if vmss["security_profile"].get("proxy_agent_settings", None) is None: - vmss["security_profile"]["proxy_agent_settings"] = {} - - vmss["security_profile"]["proxy_agent_settings"]["add_proxy_agent_extension"] = add_proxy_agent_extension - else: - if vmss.get("virtual_machine_profile", None) is None: - vmss["virtual_machine_profile"] = {} - if vmss["virtual_machine_profile"].get("security_profile", None) is None: - vmss["virtual_machine_profile"]["security_profile"] = {} - if vmss["virtual_machine_profile"]["security_profile"].get("proxy_agent_settings", None) is None: - vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"] = {} - - vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"]["add_proxy_agent_extension"] = \ - add_proxy_agent_extension - - # handle vmss instance update - from azure.cli.core.util import b64encode - - if instance_id is not None: - if license_type is not None: - vmss["license_type"] = license_type - - if user_data is not None: - vmss["user_data"] = b64encode(user_data) - - if vmss.get("protection_policy", None) is None: - vmss["protection_policy"] = {} - - if protect_from_scale_in is not None: - vmss["protection_policy"]["protect_from_scale_in"] = protect_from_scale_in - - if protect_from_scale_set_actions is not None: - vmss["protection_policy"]["protect_from_scale_set_actions"] = protect_from_scale_set_actions - - vmss["resource_group"] = resource_group_name - vmss["vm_scale_set_name"] = name - vmss["instance_id"] = instance_id - vmss["no_wait"] = no_wait - - from .operations.vmss_vms import VMSSVMSCreate - return VMSSVMSCreate(cli_ctx=cmd.cli_ctx)(command_args=vmss) - - # else handle vmss update - if license_type is not None: - if vmss.get("virtual_machine_profile", None) is None: - vmss["virtual_machine_profile"] = {} - vmss["virtual_machine_profile"]["license_type"] = license_type - - if user_data is not None: - if vmss.get("virtual_machine_profile", None) is None: - vmss["virtual_machine_profile"] = {} - vmss["virtual_machine_profile"]["user_data"] = b64encode(user_data) - - if v_cpus_available is not None or v_cpus_per_core is not None: - if vmss.get("virtual_machine_profile", None) is None: - vmss["virtual_machine_profile"] = {} - if vmss["virtual_machine_profile"].get("hardware_profile", None) is None: - vmss["virtual_machine_profile"]["hardware_profile"] = {} - if vmss["virtual_machine_profile"]["hardware_profile"].get("vm_size_properties", None) is None: - vmss["virtual_machine_profile"]["hardware_profile"]["vm_size_properties"] = {} - - if v_cpus_available is not None: - vmss["virtual_machine_profile"]["hardware_profile"]["vm_size_properties"]["v_cp_us_available"] = v_cpus_available # pylint: disable=line-too-long - if v_cpus_per_core is not None: - vmss["virtual_machine_profile"]["hardware_profile"]["vm_size_properties"]["v_cp_us_per_core"] = v_cpus_per_core # pylint: disable=line-too-long - - if capacity_reservation_group is not None: - if vmss.get("virtual_machine_profile", None) is None: - vmss["virtual_machine_profile"] = {} - if capacity_reservation_group == 'None': - capacity_reservation_group = None - - sub_resource = {"id": capacity_reservation_group} - capacity_reservation = {"capacity_reservation_group": sub_resource} - vmss["virtual_machine_profile"]["capacity_reservation"] = capacity_reservation - - if enable_terminate_notification is not None or terminate_notification_time is not None: - if vmss.get("virtual_machine_profile", None) is None: - vmss["virtual_machine_profile"] = {} - if vmss["virtual_machine_profile"].get("scheduled_events_profile", None) is None: - vmss["virtual_machine_profile"]["scheduled_events_profile"] = {} - vmss["virtual_machine_profile"]["scheduled_events_profile"]["terminate_notification_profile"] = \ - {"not_before_timeout": terminate_notification_time, - "enable": enable_terminate_notification} - - if additional_scheduled_events is not None or \ - enable_user_reboot_scheduled_events is not None or enable_user_redeploy_scheduled_events is not None: - if vmss.get("scheduled_events_policy", None) is None: - vmss["scheduled_events_policy"] = {} - - if additional_scheduled_events is not None: - if vmss["scheduled_events_policy"].get("scheduled_events_additional_publishing_targets", None) is None: - vmss["scheduled_events_policy"]["scheduled_events_additional_publishing_targets"] = {} - if vmss["scheduled_events_policy"]["scheduled_events_additional_publishing_targets"].get("event_grid_and_resource_graph", None) is None: # pylint: disable=line-too-long - vmss["scheduled_events_policy"]["scheduled_events_additional_publishing_targets"]["event_grid_and_resource_graph"] = {} # pylint: disable=line-too-long - vmss["scheduled_events_policy"]["scheduled_events_additional_publishing_targets"][ - "event_grid_and_resource_graph"]["enable"] = additional_scheduled_events - - if enable_user_redeploy_scheduled_events is not None: - if vmss["scheduled_events_policy"].get("user_initiated_redeploy", None) is None: - vmss["scheduled_events_policy"]["user_initiated_redeploy"] = {} - vmss["scheduled_events_policy"]["user_initiated_redeploy"]["automatically_approve"] \ - = enable_user_redeploy_scheduled_events - - if enable_user_reboot_scheduled_events is not None: - if vmss["scheduled_events_policy"].get("user_initiated_reboot", None) is None: - vmss["scheduled_events_policy"]["user_initiated_reboot"] = {} - vmss["scheduled_events_policy"]["user_initiated_reboot"][ - "automatically_approve"] = enable_user_reboot_scheduled_events - - if enable_osimage_notification is not None: - if vmss.get("virtual_machine_profile", None) is None: - vmss["virtual_machine_profile"] = {} - if vmss["virtual_machine_profile"].get("scheduled_events_profile", None) is None: - vmss["virtual_machine_profile"]["scheduled_events_profile"] = {} - vmss["virtual_machine_profile"]["scheduled_events_profile"]["os_image_notification_profile"] = { - "enable": enable_osimage_notification - } - - if enable_automatic_repairs is not None or automatic_repairs_grace_period is not None or automatic_repairs_action is not None: # pylint: disable=line-too-long - if vmss.get("automatic_repairs_policy", None) is None: - vmss["automatic_repairs_policy"] = {} - if enable_automatic_repairs is not None: - vmss["automatic_repairs_policy"]["enabled"] = enable_automatic_repairs - if automatic_repairs_grace_period is not None: - vmss["automatic_repairs_policy"]["grace_period"] = automatic_repairs_grace_period - if automatic_repairs_action is not None: - vmss["automatic_repairs_policy"]["repair_action"] = automatic_repairs_action - - if ultra_ssd_enabled is not None: - if vmss.get("additional_capabilities", None) is None: - vmss["additional_capabilities"] = {"ultra_ssd_enabled": ultra_ssd_enabled} - else: - vmss["additional_capabilities"]["ultra_ssd_enabled"] = ultra_ssd_enabled - - if scale_in_policy is not None or force_deletion is not None: - if vmss.get("scale_in_policy", None) is None: - vmss["scale_in_policy"] = {} - if scale_in_policy is not None: - vmss["scale_in_policy"]["rules"] = scale_in_policy - if force_deletion is not None: - vmss["scale_in_policy"]["force_deletion"] = force_deletion - - if enable_spot_restore is not None: - if vmss.get("spot_restore_policy", None) is None: - vmss["spot_restore_policy"] = {} - vmss["spot_restore_policy"]["enabled"] = enable_spot_restore - - if spot_restore_timeout is not None: - if vmss.get("spot_restore_policy", None) is None: - vmss["spot_restore_policy"] = {} - vmss["spot_restore_policy"]["restore_timeout"] = spot_restore_timeout - - if priority is not None: - if vmss.get("virtual_machine_profile", None) is None: - vmss["virtual_machine_profile"] = {} - vmss["virtual_machine_profile"]["priority"] = priority - - if max_price is not None: - if vmss.get("virtual_machine_profile", None) is None: - vmss["virtual_machine_profile"] = {} - if vmss["virtual_machine_profile"].get("billing_profile", None) is None: - vmss["virtual_machine_profile"]["billing_profile"] = {} - vmss["virtual_machine_profile"]["billing_profile"]["max_price"] = max_price - - if security_type is not None or enable_secure_boot is not None or enable_vtpm is not None: - if vmss.get("virtual_machine_profile", None) is None: - vmss["virtual_machine_profile"] = {} - - security_profile = vmss["virtual_machine_profile"].get("security_profile", {}) - prev_security_type = security_profile.get("security_type", None) - # At present, `SecurityType` has options `TrustedLaunch` and `Standard` - if security_type == 'TrustedLaunch' and prev_security_type != security_type: - vmss["virtual_machine_profile"]["security_profile"] = { - 'security_type': security_type, - 'uefi_settings': { - 'secure_boot_enabled': enable_secure_boot if enable_secure_boot is not None else False, - 'v_tpm_enabled': enable_vtpm if enable_vtpm is not None else True - } - } - elif security_type == 'Standard': - if prev_security_type == 'TrustedLaunch': - logger.warning('Turning off Trusted launch disables foundational security for your VMs. ' - 'For more information, visit https://aka.ms/TrustedLaunch') - vmss["virtual_machine_profile"]["security_profile"] = { - 'security_type': security_type, - 'uefi_settings': None - } - else: - vmss["virtual_machine_profile"]["security_profile"] = { - 'uefi_settings': { - 'secure_boot_enabled': enable_secure_boot, - 'v_tpm_enabled': enable_vtpm - }} - - if enable_proxy_agent is not None or wire_server_mode is not None or imds_mode is not None: - if vmss.get("virtual_machine_profile", None) is None: - vmss["virtual_machine_profile"] = {} - - if vmss["virtual_machine_profile"].get("security_profile", None) is None: - vmss["virtual_machine_profile"]["security_profile"] = { - "proxy_agent_settings": { - "wire_server": {}, - "imds": {} - } - } - elif vmss["virtual_machine_profile"]["security_profile"].get("proxy_agent_settings", None) is None: - vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"] = { - "wire_server": {}, "imds": {} - } - else: - if vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"].get("wire_server", None) is None: # pylint: disable=line-too-long - vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"]["wire_server"] = {} - if vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"].get("imds", None) is None: - vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"]["imds"] = {} - - if enable_proxy_agent is not None: - vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"]["enabled"] = enable_proxy_agent - if wire_server_mode is not None: - vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"]["wire_server"]["mode"] \ - = wire_server_mode - if imds_mode is not None: - vmss["virtual_machine_profile"]["security_profile"]["proxy_agent_settings"]["imds"]["mode"] = imds_mode - - if regular_priority_count is not None or regular_priority_percentage is not None: - if vmss.get("orchestration_mode", None) != 'Flexible': - raise ValidationError("--regular-priority-count/--regular-priority-percentage is only available for" - " VMSS with flexible orchestration mode") - if vmss.get("priority_mix_policy", None) is None: - vmss["priority_mix_policy"] = { - 'base_regular_priority_count': regular_priority_count, - 'regular_priority_percentage_above_base': regular_priority_percentage - } - else: - if regular_priority_count is not None: - vmss["priority_mix_policy"]["base_regular_priority_count"] = regular_priority_count - if regular_priority_percentage is not None: - vmss["priority_mix_policy"]["regular_priority_percentage_above_base"] = regular_priority_percentage - - if proximity_placement_group is not None: - vmss["proximity_placement_group"] = {'id': proximity_placement_group} - - # pylint: disable=too-many-boolean-expressions - if max_batch_instance_percent is not None or max_unhealthy_instance_percent is not None \ - or max_unhealthy_upgraded_instance_percent is not None or pause_time_between_batches is not None \ - or enable_cross_zone_upgrade is not None or prioritize_unhealthy_instances is not None \ - or max_surge is not None: - if vmss.get("upgrade_policy", None) is None: - vmss["upgrade_policy"] = {"rolling_upgrade_policy": None} - if vmss["upgrade_policy"].get("rolling_upgrade_policy", None) is None: - vmss["upgrade_policy"]["rolling_upgrade_policy"] = { - 'max_batch_instance_percent': max_batch_instance_percent, - 'max_unhealthy_instance_percent': max_unhealthy_instance_percent, - 'max_unhealthy_upgraded_instance_percent': max_unhealthy_upgraded_instance_percent, - 'pause_time_between_batches': pause_time_between_batches, - 'enable_cross_zone_upgrade': enable_cross_zone_upgrade, - 'prioritize_unhealthy_instances': prioritize_unhealthy_instances, - 'max_surge': max_surge - } - else: - if max_batch_instance_percent is not None: - vmss["upgrade_policy"]["rolling_upgrade_policy"]["max_batch_instance_percent"] \ - = max_batch_instance_percent - if max_unhealthy_instance_percent is not None: - vmss["upgrade_policy"]["rolling_upgrade_policy"]["max_unhealthy_instance_percent"] \ - = max_unhealthy_instance_percent - if max_unhealthy_upgraded_instance_percent is not None: - vmss["upgrade_policy"]["rolling_upgrade_policy"]["max_unhealthy_upgraded_instance_percent"] \ - = max_unhealthy_upgraded_instance_percent - if pause_time_between_batches is not None: - vmss["upgrade_policy"]["rolling_upgrade_policy"]["pause_time_between_batches"] \ - = pause_time_between_batches - if enable_cross_zone_upgrade is not None: - vmss["upgrade_policy"]["rolling_upgrade_policy"]["enable_cross_zone_upgrade"] \ - = enable_cross_zone_upgrade - if prioritize_unhealthy_instances is not None: - vmss["upgrade_policy"]["rolling_upgrade_policy"]["prioritize_unhealthy_instances"] \ - = prioritize_unhealthy_instances - if max_surge is not None: - vmss["upgrade_policy"]["rolling_upgrade_policy"]["max_surge"] = max_surge - - if upgrade_policy_mode is not None: - if vmss.get("upgrade_policy", None) is None: - vmss["upgrade_policy"] = {} - vmss["upgrade_policy"]["mode"] = upgrade_policy_mode - - if enable_auto_os_upgrade is not None: - if vmss.get("upgrade_policy", None) is None: - vmss["upgrade_policy"] = {} - if vmss["upgrade_policy"].get("automatic_os_upgrade_policy", None) is None: - vmss["upgrade_policy"]["automatic_os_upgrade_policy"] \ - = {'enable_automatic_os_upgrade': enable_auto_os_upgrade} - else: - vmss["upgrade_policy"]["automatic_os_upgrade_policy"]["enable_automatic_os_upgrade"] \ - = enable_auto_os_upgrade - - if vm_sku is not None: - if vmss.get("sku", {}).get("name", None) == vm_sku: - logger.warning("VMSS sku is already %s", vm_sku) - else: - if vmss.get("sku", None) is None: - vmss["sku"] = {} - vmss["sku"]["name"] = vm_sku - - sku_profile = {} - if skuprofile_vmsizes is not None or skuprofile_allostrat is not None: - if skuprofile_vmsizes is not None: - sku_profile_vmsizes_list = [] - for vm_size in skuprofile_vmsizes: - vmsize_obj = { - 'name': vm_size - } - sku_profile_vmsizes_list.append(vmsize_obj) - sku_profile['vm_sizes'] = sku_profile_vmsizes_list - - if skuprofile_rank: - for vm_size, rank in zip(sku_profile_vmsizes_list, skuprofile_rank): - vm_size['rank'] = rank - - if skuprofile_allostrat is not None: - sku_profile['allocation_strategy'] = skuprofile_allostrat - vmss["sku_profile"] = sku_profile - - if ephemeral_os_disk_placement is not None or ephemeral_os_disk_option is not None or ephemeral_os_disk is not None: - if vmss.get("virtual_machine_profile", None) is None: - vmss["virtual_machine_profile"] = {} - if vmss["virtual_machine_profile"].get("storage_profile", None) is None: - vmss["virtual_machine_profile"]["storage_profile"] = {} - if vmss["virtual_machine_profile"]["storage_profile"].get("os_disk", None) is None: - vmss["virtual_machine_profile"]["storage_profile"]["os_disk"] = {} - if vmss["virtual_machine_profile"]["storage_profile"]["os_disk"].get("diff_disk_settings", None) is None: - vmss["virtual_machine_profile"]["storage_profile"]["os_disk"]["diff_disk_settings"] = {} - - if ephemeral_os_disk_placement is not None: - vmss["virtual_machine_profile"]["storage_profile"]["os_disk"]["diff_disk_settings"]["placement"] \ - = ephemeral_os_disk_placement - if ephemeral_os_disk_option is not None: - vmss["virtual_machine_profile"]["storage_profile"]["os_disk"]["diff_disk_settings"]["option"] \ - = ephemeral_os_disk_option - if ephemeral_os_disk is False: - vmss["virtual_machine_profile"]["storage_profile"]["os_disk"]["diff_disk_settings"] = {} - - if disk_controller_type is not None: - if vmss.get("virtual_machine_profile", None) is None: - vmss["virtual_machine_profile"] = {} - if vmss["virtual_machine_profile"].get("storage_profile", None) is None: - vmss["virtual_machine_profile"]["storage_profile"] = {} - vmss["virtual_machine_profile"]["storage_profile"]["disk_controller_type"] = disk_controller_type - - if custom_data is not None: - if vmss.get("virtual_machine_profile", None) is None: - vmss["virtual_machine_profile"] = {} - if vmss["virtual_machine_profile"].get("os_profile", None) is None: - vmss["virtual_machine_profile"]["os_profile"] = {} - custom_data = read_content_if_is_file(custom_data) - vmss["virtual_machine_profile"]["os_profile"]["custom_data"] = b64encode(custom_data) - - if enable_hibernation is not None: - if vmss.get("additional_capabilities", None) is None: - vmss["additional_capabilities"] = {"hibernation_enabled": enable_hibernation} - else: - vmss["additional_capabilities"]["hibernation_enabled"] = enable_hibernation - - if security_posture_reference_id is not None or security_posture_reference_exclude_extensions is not None or \ - security_posture_reference_is_overridable is not None: - if vmss.get("virtual_machine_profile", None) is None: - vmss["virtual_machine_profile"] = {} - if vmss["virtual_machine_profile"].get("security_posture_reference", None) is None: - vmss["virtual_machine_profile"]["security_posture_reference"] = {} - - if security_posture_reference_id is not None: - vmss["virtual_machine_profile"]["security_posture_reference"]["id"] = security_posture_reference_id - if security_posture_reference_exclude_extensions is not None: - vmss["virtual_machine_profile"]["security_posture_reference"]["exclude_extensions"] \ - = security_posture_reference_exclude_extensions - if security_posture_reference_is_overridable is not None: - vmss["virtual_machine_profile"]["security_posture_reference"]["is_overridable"] \ - = security_posture_reference_is_overridable - - if enable_resilient_creation is not None or enable_resilient_deletion is not None: - if vmss.get("resiliency_policy", None) is None: - vmss["resiliency_policy"] = {} - if enable_resilient_creation is not None: - vmss["resiliency_policy"]["resilient_vm_creation_policy"] = {'enabled': enable_resilient_creation} - if enable_resilient_deletion is not None: - vmss["resiliency_policy"]["resilient_vm_deletion_policy"] = {'enabled': enable_resilient_deletion} - - if enable_automatic_zone_balancing is not None or automatic_zone_balancing_strategy is not None or \ - automatic_zone_balancing_behavior is not None: - if vmss.get("resiliency_policy", None) is None: - vmss["resiliency_policy"] = {} - if vmss["resiliency_policy"].get("automatic_zone_rebalancing_policy", None) is None: - vmss["resiliency_policy"]["automatic_zone_rebalancing_policy"] = {} - - if enable_automatic_zone_balancing is not None: - vmss["resiliency_policy"]["automatic_zone_rebalancing_policy"]["enabled"] = enable_automatic_zone_balancing - - if automatic_zone_balancing_strategy is not None: - vmss["resiliency_policy"]["automatic_zone_rebalancing_policy"]["rebalance_strategy"] \ - = automatic_zone_balancing_strategy - - if automatic_zone_balancing_behavior is not None: - vmss["resiliency_policy"]["automatic_zone_rebalancing_policy"]["rebalance_behavior"] \ - = automatic_zone_balancing_behavior - - if zones is not None: - vmss["zones"] = zones - - if zone_balance is not None: - vmss["zone_balance"] = zone_balance - - vmss["resource_group"] = resource_group_name - vmss["vm_scale_set_name"] = name - vmss["no_wait"] = no_wait - - from .operations.vmss import VMSSCreate - return VMSSCreate(cli_ctx=cmd.cli_ctx)(command_args=vmss) - -# endregion - - -# region VirtualMachineScaleSets Diagnostics -def set_vmss_diagnostics_extension( - cmd, resource_group_name, vmss_name, settings, protected_settings=None, version=None, - no_auto_upgrade=False): - client = _compute_client_factory(cmd.cli_ctx) - vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) - # pylint: disable=no-member - is_linux_os = _is_linux_os(vmss.virtual_machine_profile) - vm_extension_name = _LINUX_DIAG_EXT if is_linux_os else _WINDOWS_DIAG_EXT - if is_linux_os and vmss.virtual_machine_profile.extension_profile: # check incompatibles - exts = vmss.virtual_machine_profile.extension_profile.extensions or [] - major_ver = extension_mappings[_LINUX_DIAG_EXT]['version'].split('.', maxsplit=1)[0] - # For VMSS, we don't do auto-removal like VM because there is no reliable API to wait for - # the removal done before we can install the newer one - if next((e for e in exts if e.name == _LINUX_DIAG_EXT and - not e.type_handler_version.startswith(major_ver + '.')), None): - delete_cmd = 'az vmss extension delete -g {} --vmss-name {} -n {}'.format( - resource_group_name, vmss_name, vm_extension_name) - raise CLIError("There is an incompatible version of diagnostics extension installed. " - "Please remove it by running '{}', and retry. 'az vmss update-instances'" - " might be needed if with manual upgrade policy".format(delete_cmd)) - - poller = set_vmss_extension(cmd, resource_group_name, vmss_name, vm_extension_name, - extension_mappings[vm_extension_name]['publisher'], - version or extension_mappings[vm_extension_name]['version'], - settings, - protected_settings, - no_auto_upgrade) - - result = LongRunningOperation(cmd.cli_ctx)(poller) - UpgradeMode = cmd.get_models('UpgradeMode') - if vmss.upgrade_policy.mode == UpgradeMode.manual: - poller2 = update_vmss_instances(cmd, resource_group_name, vmss_name, ['*']) - LongRunningOperation(cmd.cli_ctx)(poller2) - return result -# endregion - - -# region VirtualMachineScaleSets Disks (Managed) -def attach_managed_data_disk_to_vmss(cmd, resource_group_name, vmss_name, size_gb=None, instance_id=None, lun=None, - caching=None, disk=None, sku=None): - - def _init_data_disk(storage_profile, lun, existing_disk=None): - data_disks = storage_profile.data_disks or [] - if lun is None: - lun = _get_disk_lun(data_disks) - if existing_disk is None: - data_disk = DataDisk(lun=lun, create_option=DiskCreateOptionTypes.empty, disk_size_gb=size_gb, - caching=caching, managed_disk=ManagedDiskParameters(storage_account_type=sku)) - else: - data_disk = DataDisk(lun=lun, create_option=DiskCreateOptionTypes.attach, caching=caching, - managed_disk=ManagedDiskParameters(id=existing_disk, storage_account_type=sku)) - - data_disks.append(data_disk) - storage_profile.data_disks = data_disks - - DiskCreateOptionTypes, ManagedDiskParameters = cmd.get_models( - 'DiskCreateOptionTypes', 'ManagedDiskParameters') - if disk is None: - DataDisk = cmd.get_models('VirtualMachineScaleSetDataDisk') - else: - DataDisk = cmd.get_models('DataDisk') - - client = _compute_client_factory(cmd.cli_ctx) - if instance_id is None: - vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) - # Avoid unnecessary permission error - vmss.virtual_machine_profile.storage_profile.image_reference = None - # pylint: disable=no-member - _init_data_disk(vmss.virtual_machine_profile.storage_profile, lun) - return client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name, vmss_name, vmss) - - vmss_vm = client.virtual_machine_scale_set_vms.get(resource_group_name, vmss_name, instance_id) - # Avoid unnecessary permission error - vmss_vm.storage_profile.image_reference = None - _init_data_disk(vmss_vm.storage_profile, lun, disk) - return client.virtual_machine_scale_set_vms.begin_update(resource_group_name, vmss_name, instance_id, vmss_vm) - - -def detach_disk_from_vmss(cmd, resource_group_name, vmss_name, lun, instance_id=None): - client = _compute_client_factory(cmd.cli_ctx) - if instance_id is None: - vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) - # Avoid unnecessary permission error - vmss.virtual_machine_profile.storage_profile.image_reference = None - # pylint: disable=no-member - data_disks = vmss.virtual_machine_profile.storage_profile.data_disks - else: - vmss_vm = client.virtual_machine_scale_set_vms.get(resource_group_name, vmss_name, instance_id) - # Avoid unnecessary permission error - vmss_vm.storage_profile.image_reference = None - data_disks = vmss_vm.storage_profile.data_disks - - if not data_disks: - raise CLIError("Data disk doesn't exist") - - leftovers = [d for d in data_disks if d.lun != lun] - if len(data_disks) == len(leftovers): - raise CLIError("Could not find the data disk with lun '{}'".format(lun)) - - if instance_id is None: - vmss.virtual_machine_profile.storage_profile.data_disks = leftovers - return client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name, vmss_name, vmss) - vmss_vm.storage_profile.data_disks = leftovers - return client.virtual_machine_scale_set_vms.begin_update(resource_group_name, vmss_name, instance_id, vmss_vm) -# endregion - - -# region VirtualMachineScaleSets Extensions -def delete_vmss_extension(cmd, resource_group_name, vmss_name, extension_name): - client = _compute_client_factory(cmd.cli_ctx) - vmss = client.virtual_machine_scale_sets.get(resource_group_name=resource_group_name, vm_scale_set_name=vmss_name) - # Avoid unnecessary permission error - vmss.virtual_machine_profile.storage_profile.image_reference = None - # pylint: disable=no-member - if not vmss.virtual_machine_profile.extension_profile: - raise CLIError('Scale set has no extensions to delete') - - keep_list = [e for e in vmss.virtual_machine_profile.extension_profile.extensions - if e.name != extension_name] - if len(keep_list) == len(vmss.virtual_machine_profile.extension_profile.extensions): - raise CLIError('Extension {} not found'.format(extension_name)) - - vmss.virtual_machine_profile.extension_profile.extensions = keep_list - - return client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name=resource_group_name, - vm_scale_set_name=vmss_name, parameters=vmss) - - -# pylint: disable=inconsistent-return-statements -def get_vmss_extension(cmd, resource_group_name, vmss_name, extension_name): - client = _compute_client_factory(cmd.cli_ctx) - vmss = client.virtual_machine_scale_sets.get(resource_group_name=resource_group_name, vm_scale_set_name=vmss_name) - # pylint: disable=no-member - if not vmss.virtual_machine_profile.extension_profile: - return - return next((e for e in vmss.virtual_machine_profile.extension_profile.extensions - if e.name == extension_name), None) - - -def list_vmss_extensions(cmd, resource_group_name, vmss_name): - client = _compute_client_factory(cmd.cli_ctx) - vmss = client.virtual_machine_scale_sets.get(resource_group_name=resource_group_name, vm_scale_set_name=vmss_name) - # pylint: disable=no-member - if vmss.virtual_machine_profile and vmss.virtual_machine_profile.extension_profile: - return vmss.virtual_machine_profile.extension_profile.extensions - return None - - -def set_vmss_extension(cmd, resource_group_name, vmss_name, extension_name, publisher, version=None, - settings=None, protected_settings=None, no_auto_upgrade=False, force_update=False, - no_wait=False, extension_instance_name=None, provision_after_extensions=None, - enable_auto_upgrade=None): - if not extension_instance_name: - extension_instance_name = extension_name - - auto_upgrade_extensions = ['CodeIntegrityAgent'] - if extension_name in auto_upgrade_extensions and enable_auto_upgrade is None: - enable_auto_upgrade = True - - client = _compute_client_factory(cmd.cli_ctx) - vmss = client.virtual_machine_scale_sets.get(resource_group_name=resource_group_name, vm_scale_set_name=vmss_name) - # Avoid unnecessary permission error - vmss.virtual_machine_profile.storage_profile.image_reference = None - VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models( - 'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile') - - # pylint: disable=no-member - version = _normalize_extension_version(cmd.cli_ctx, publisher, extension_name, version, vmss.location) - extension_profile = vmss.virtual_machine_profile.extension_profile - if extension_profile: - extensions = extension_profile.extensions - if extensions: - extension_profile.extensions = [x for x in extensions if - x.type_properties_type.lower() != extension_name.lower() or x.publisher.lower() != publisher.lower()] # pylint: disable=line-too-long - - if cmd.supported_api_version(min_api='2019-07-01', operation_group='virtual_machine_scale_sets'): - ext = VirtualMachineScaleSetExtension(name=extension_instance_name, - publisher=publisher, - type_properties_type=extension_name, - protected_settings=protected_settings, - type_handler_version=version, - settings=settings, - auto_upgrade_minor_version=(not no_auto_upgrade), - provision_after_extensions=provision_after_extensions, - enable_automatic_upgrade=enable_auto_upgrade) - else: - ext = VirtualMachineScaleSetExtension(name=extension_instance_name, - publisher=publisher, - type=extension_name, - protected_settings=protected_settings, - type_handler_version=version, - settings=settings, - auto_upgrade_minor_version=(not no_auto_upgrade), - provision_after_extensions=provision_after_extensions, - enable_automatic_upgrade=enable_auto_upgrade) - - if force_update: - ext.force_update_tag = str(_gen_guid()) - - if not vmss.virtual_machine_profile.extension_profile: - vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(extensions=[]) - vmss.virtual_machine_profile.extension_profile.extensions.append(ext) - - return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_create_or_update, - resource_group_name, vmss_name, vmss) - - -def set_orchestration_service_state(cmd, resource_group_name, vm_scale_set_name, service_name, action, no_wait=False): - # currently service_name has only one available value "AutomaticRepairs". And SDK does not accept service_name, - # instead SDK assign it to "AutomaticRepairs" in its own logic. As there may be more service name to be supported, - # we define service_name as a required parameter here to avoid introducing a breaking change in the future. - client = _compute_client_factory(cmd.cli_ctx) - OrchestrationServiceStateInput = cmd.get_models('OrchestrationServiceStateInput') - state_input = OrchestrationServiceStateInput(service_name=service_name, action=action) - return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_set_orchestration_service_state, - resource_group_name, vm_scale_set_name, state_input) - - -def upgrade_vmss_extension(cmd, resource_group_name, vm_scale_set_name, no_wait=False): - client = _compute_client_factory(cmd.cli_ctx) - return sdk_no_wait(no_wait, client.virtual_machine_scale_set_rolling_upgrades.begin_start_extension_upgrade, - resource_group_name, vm_scale_set_name) -# endregion - - -# region VirtualMachineScaleSets RunCommand -def vmss_run_command_invoke(cmd, resource_group_name, vmss_name, command_id, instance_id, scripts=None, parameters=None): # pylint: disable=line-too-long - return run_command_invoke(cmd, resource_group_name, vmss_name, command_id, scripts, parameters, instance_id) - - -def vmss_run_command_create(cmd, - resource_group_name, - vmss_name, - instance_id, - run_command_name, - location, - tags=None, - script=None, - script_uri=None, - command_id=None, - parameters=None, - protected_parameters=None, - async_execution=None, - run_as_user=None, - run_as_password=None, - timeout_in_seconds=None, - output_blob_uri=None, - error_blob_uri=None, - no_wait=False): - from .aaz.latest.vmss.run_command import Create - args = {} - args['location'] = location - args['resource_group'] = resource_group_name - args['run_command_name'] = run_command_name - args['instance_id'] = instance_id - args['vmss_name'] = vmss_name - args['no_wait'] = no_wait - if tags is not None: - args['tags'] = tags - if script is not None: - args['script'] = script - if script_uri is not None: - args['script_uri'] = script_uri - if command_id is not None: - args['command_id'] = command_id - if parameters is not None: - auto_arg_name_num = 0 - args['parameters'] = [] - for p in parameters: - if '=' in p: - n, v = p.split('=', 1) - else: - auto_arg_name_num += 1 - n = 'arg{}'.format(auto_arg_name_num) - v = p - args['parameters'].append({'name': n, 'value': v}) - if protected_parameters is not None: - auto_arg_name_num = 0 - args['protected_parameters'] = [] - for p in protected_parameters: - if '=' in p: - n, v = p.split('=', 1) - else: - auto_arg_name_num += 1 - n = 'arg{}'.format(auto_arg_name_num) - v = p - args['protected_parameters'].append({'name': n, 'value': v}) - if async_execution is not None: - args['async_execution'] = async_execution - else: - args['async_execution'] = False - if run_as_user is not None: - args['run_as_user'] = run_as_user - if run_as_password is not None: - args['run_as_password'] = run_as_password - if timeout_in_seconds is not None: - args['timeout_in_seconds'] = timeout_in_seconds - if output_blob_uri is not None: - args['output_blob_uri'] = output_blob_uri - if error_blob_uri is not None: - args['error_blob_uri'] = error_blob_uri - return Create(cli_ctx=cmd.cli_ctx)(command_args=args) - - -def vmss_run_command_update(cmd, - resource_group_name, - vmss_name, - instance_id, - run_command_name, - location, - tags=None, - script=None, - script_uri=None, - command_id=None, - parameters=None, - protected_parameters=None, - async_execution=None, - run_as_user=None, - run_as_password=None, - timeout_in_seconds=None, - output_blob_uri=None, - error_blob_uri=None, - no_wait=False): - from .aaz.latest.vmss.run_command import Update as _Update - - class Update(_Update): - def pre_instance_update(self, instance): - if tags is not None: - instance.tags = tags - if location is not None: - instance.location = location - if script is not None: - instance.properties.source.script = script - if script_uri is not None: - instance.properties.source.script_uri = script_uri - if command_id is not None: - instance.properties.source.command_id = command_id - if parameters is not None: - auto_arg_name_num = 0 - _params = [] - for p in parameters: - if '=' in p: - n, v = p.split('=', 1) - else: - auto_arg_name_num += 1 - n = 'arg{}'.format(auto_arg_name_num) - v = p - _params.append({'name': n, 'value': v}) - instance.properties.parameters = _params - if protected_parameters is not None: - auto_arg_name_num = 0 - _params = [] - for p in protected_parameters: - if '=' in p: - n, v = p.split('=', 1) - else: - auto_arg_name_num += 1 - n = 'arg{}'.format(auto_arg_name_num) - v = p - _params.append({'name': n, 'value': v}) - instance.properties.protected_parameters = _params - if async_execution is not None: - instance.properties.async_execution = async_execution - else: - instance.properties.async_execution = False - if run_as_user is not None: - instance.properties.run_as_user = run_as_user - if run_as_password is not None: - instance.properties.run_as_password = run_as_password - if timeout_in_seconds is not None: - instance.properties.timeout_in_seconds = timeout_in_seconds - if output_blob_uri is not None: - instance.properties.output_blob_uri = output_blob_uri - if error_blob_uri is not None: - instance.properties.error_blob_uri = error_blob_uri - - args = {} - args['resource_group'] = resource_group_name - args['run_command_name'] = run_command_name - args['instance_id'] = instance_id - args['vmss_name'] = vmss_name - args['no_wait'] = no_wait - - return Update(cli_ctx=cmd.cli_ctx)(command_args=args) - - -def vmss_run_command_show(cmd, - resource_group_name, - vmss_name, - instance_id, - run_command_name, - expand=None, - instance_view=False): - from .aaz.latest.vmss.run_command import Show - if instance_view: - expand = 'instanceView' - return Show(cli_ctx=cmd.cli_ctx)(command_args={ - 'resource_group': resource_group_name, - 'vmss_name': vmss_name, - 'instance_id': instance_id, - 'run_command_name': run_command_name, - 'expand': expand - }) -# endregion - - -# region VirtualMachineScaleSets Identity -def remove_vmss_identity(cmd, resource_group_name, vmss_name, identities=None): - def setter(resource_group_name, vmss_name, vmss): - command_args = { - 'resource_group': resource_group_name, - 'vm_scale_set_name': vmss_name - } - - if vmss.get('identity') and vmss['identity'].get('type') == IdentityType.USER_ASSIGNED.value: - # NOTE: The literal 'UserAssigned' is intentionally appended as a marker for - # VMSSIdentityRemove._format_content, which uses it to apply special handling - # for purely user-assigned identities. It is not a real identity resource ID. - command_args['mi_user_assigned'] = \ - list(vmss.get('identity', {}).get('userAssignedIdentities', {}).keys()) + ['UserAssigned'] - elif vmss.get('identity') and vmss['identity'].get('type') == IdentityType.SYSTEM_ASSIGNED.value: - command_args['mi_user_assigned'] = [] - command_args['mi_system_assigned'] = 'True' - elif vmss.get('identity') and vmss['identity'].get('type') == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.value: - command_args['mi_user_assigned'] = list(vmss.get('identity', {}).get('userAssignedIdentities', {}).keys()) - command_args['mi_system_assigned'] = 'True' - else: - command_args['mi_user_assigned'] = [] - - from .operations.vmss import VMSSIdentityRemove - return VMSSIdentityRemove(cli_ctx=cmd.cli_ctx)(command_args=command_args) - - if identities is None: - from ._vm_utils import MSI_LOCAL_ID - identities = [MSI_LOCAL_ID] - - return _remove_identities_by_aaz(cmd, resource_group_name, vmss_name, identities, get_vmss_by_aaz, setter) -# endregion - - -def create_gallery_image(cmd, resource_group_name, gallery_name, gallery_image_name, os_type, publisher, offer, sku, - os_state='Generalized', end_of_life_date=None, privacy_statement_uri=None, - release_note_uri=None, eula=None, description=None, location=None, - minimum_cpu_core=None, maximum_cpu_core=None, minimum_memory=None, maximum_memory=None, - disallowed_disk_types=None, plan_name=None, plan_publisher=None, plan_product=None, tags=None, - hyper_v_generation='V2', features=None, architecture=None): - logger.warning( - "Starting Build (May) 2024, \"az sig image-definition create\" command will use the new default values " - "Hyper-V Generation: V2 and SecurityType: TrustedLaunchSupported." - ) - - location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) - - end_of_life_date = fix_gallery_image_date_info(end_of_life_date) - recommendation = None - if any([minimum_cpu_core, maximum_cpu_core, minimum_memory, maximum_memory]): - cpu_recommendation, memory_recommendation = None, None - if any([minimum_cpu_core, maximum_cpu_core]): - cpu_recommendation = { - "min": minimum_cpu_core, - "max": maximum_cpu_core, - } - if any([minimum_memory, maximum_memory]): - memory_recommendation = { - "min": minimum_memory, - "max": maximum_memory, - } - - recommendation = { - "v_cp_us": cpu_recommendation, - "memory": memory_recommendation - } - purchase_plan = None - if any([plan_name, plan_publisher, plan_product]): - purchase_plan = { - "name": plan_name, - "publisher": plan_publisher, - "product": plan_product, - } - - feature_list = None - if features: - from ._constants import COMPATIBLE_SECURITY_TYPE_VALUE, UPGRADE_SECURITY_HINT - feature_list = [] - security_type = None - for item in features.split(): - try: - key, value = item.split('=', 1) - # create Non-Trusted Launch VM Image - # The `Standard` is used for backward compatibility to allow customers to keep their current behavior - # after changing the default values to Trusted Launch VMs in the future. - if key == 'SecurityType': - security_type = True - if key == 'SecurityType' and value == COMPATIBLE_SECURITY_TYPE_VALUE: - logger.warning(UPGRADE_SECURITY_HINT) - continue - feature_list.append({ - "name": key, - "value": value, - }) - except ValueError: - raise CLIError('usage error: --features KEY=VALUE [KEY=VALUE ...]') - if security_type is None and hyper_v_generation == 'V2': - feature_list.append({ - "name": "SecurityType", - "value": "TrustedLaunchSupported", - }) - if features is None and cmd.cli_ctx.cloud.profile == 'latest' and hyper_v_generation == 'V2': - feature_list = [{ - "name": "SecurityType", - "value": "TrustedLaunchSupported", - }] - - args = { - "identifier": {"publisher": publisher, "offer": offer, "sku": sku}, - "os_type": os_type, - "os_state": os_state, - "end_of_life_date": end_of_life_date, - "recommended": recommendation, - "disallowed": {"disk_types": disallowed_disk_types}, - "purchase_plan": purchase_plan, - "location": location, - "eula": eula, - "tags": tags or {}, - "hyper_v_generation": hyper_v_generation, - "features": feature_list, - "architecture": architecture, - "resource_group": resource_group_name, - "gallery_name": gallery_name, - "gallery_image_name": gallery_image_name, - } - - from .aaz.latest.sig.image_definition import Create - return Create(cli_ctx=cmd.cli_ctx)(command_args=args) - - -def _add_aux_subscription(aux_subscriptions, resource_id): - if resource_id: - aux_subs = _parse_aux_subscriptions(resource_id) - if aux_subs and aux_subs[0] not in aux_subscriptions: - aux_subscriptions.extend(aux_subs) - - -def _get_image_version_aux_subscription(managed_image, os_snapshot, data_snapshots): - aux_subscriptions = [] - _add_aux_subscription(aux_subscriptions, managed_image) - _add_aux_subscription(aux_subscriptions, os_snapshot) - if data_snapshots: - for data_snapshot in data_snapshots: - _add_aux_subscription(aux_subscriptions, data_snapshot) - return aux_subscriptions if aux_subscriptions else None - - -def create_image_version(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version, - location=None, target_regions=None, storage_account_type=None, - end_of_life_date=None, exclude_from_latest=None, replica_count=None, tags=None, - os_snapshot=None, data_snapshots=None, managed_image=None, data_snapshot_luns=None, - target_region_encryption=None, os_vhd_uri=None, os_vhd_storage_account=None, - data_vhds_uris=None, data_vhds_luns=None, data_vhds_storage_accounts=None, - replication_mode=None, target_region_cvm_encryption=None, virtual_machine=None, - image_version=None, target_zone_encryption=None, target_edge_zones=None, - allow_replicated_location_deletion=None, block_deletion_before_end_of_life=None, - no_wait=False): - from azure.mgmt.core.tools import resource_id, is_valid_resource_id - from azure.cli.core.commands.client_factory import get_subscription_id - - location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) - end_of_life_date = fix_gallery_image_date_info(end_of_life_date) - if managed_image and not is_valid_resource_id(managed_image): - managed_image = resource_id(subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, - namespace='Microsoft.Compute', type='images', name=managed_image) - if os_snapshot and not is_valid_resource_id(os_snapshot): - os_snapshot = resource_id(subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, - namespace='Microsoft.Compute', type='snapshots', name=os_snapshot) - if data_snapshots: - for i, s in enumerate(data_snapshots): - if not is_valid_resource_id(data_snapshots[i]): - data_snapshots[i] = resource_id( - subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, - namespace='Microsoft.Compute', type='snapshots', name=s) - - profile = { - "exclude_from_latest": exclude_from_latest, - "end_of_life_date": end_of_life_date, - "target_regions": target_regions or [{"name": location}], - "replica_count": replica_count, - "storage_account_type": storage_account_type - } - - if target_edge_zones: - profile["target_extended_locations"] = target_edge_zones - - if replication_mode is not None: - profile["replication_mode"] = replication_mode - if not cmd.supported_api_version(min_api='2022-03-03', operation_group='gallery_image_versions'): - source = {"managed_image": {"id": managed_image}} - profile["source"] = source - - if cmd.supported_api_version(min_api='2019-07-01', operation_group='gallery_image_versions'): - if managed_image is None and os_snapshot is None and os_vhd_uri is None: - raise RequiredArgumentMissingError('usage error: Please provide --managed-image or --os-snapshot or --vhd') - - source = os_disk_image = data_disk_images = None - if virtual_machine is not None and cmd.supported_api_version(min_api='2023-07-03', - operation_group='gallery_image_versions'): - source = {"virtual_machine_id": virtual_machine} - elif managed_image is not None: - source = {"id": managed_image} - if os_snapshot is not None: - os_disk_image = {"source": {"id": os_snapshot}} - if data_snapshot_luns and not data_snapshots: - raise ArgumentUsageError('usage error: --data-snapshot-luns must be used together with --data-snapshots') - if data_snapshots: - if data_snapshot_luns and len(data_snapshots) != len(data_snapshot_luns): - raise ArgumentUsageError('usage error: Length of --data-snapshots and ' - '--data-snapshot-luns should be equal.') - if not data_snapshot_luns: - data_snapshot_luns = list(range(len(data_snapshots))) - data_disk_images = [] - for i, s in enumerate(data_snapshots): - data_disk_images.append({"source": {"id": s}, "lun": int(data_snapshot_luns[i])}) - # from vhd, only support os image now - if cmd.supported_api_version(min_api='2020-09-30', operation_group='gallery_image_versions'): - # OS disk - if os_vhd_uri and os_vhd_storage_account is None or os_vhd_uri is None and os_vhd_storage_account: - raise ArgumentUsageError('--os-vhd-uri and --os-vhd-storage-account should be used together.') - if os_vhd_uri and os_vhd_storage_account: - if not is_valid_resource_id(os_vhd_storage_account): - os_vhd_storage_account = resource_id( - subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, - namespace='Microsoft.Storage', type='storageAccounts', name=os_vhd_storage_account) - os_disk_image = { - "source": { - "storage_account_id": os_vhd_storage_account, - "uri": os_vhd_uri - } - } - - # Data disks - if data_vhds_uris and data_vhds_storage_accounts is None or \ - data_vhds_uris is None and data_vhds_storage_accounts: - raise ArgumentUsageError('--data-vhds-uris and --data-vhds-storage-accounts should be used together.') - if data_vhds_luns and data_vhds_uris is None: - raise ArgumentUsageError('--data-vhds-luns must be used together with --data-vhds-uris') - if data_vhds_uris: - # Generate LUNs - if data_vhds_luns is None: - # 0, 1, 2, ... - data_vhds_luns = list(range(len(data_vhds_uris))) - # Check length - len_data_vhds_uris = len(data_vhds_uris) - len_data_vhds_luns = len(data_vhds_luns) - len_data_vhds_storage_accounts = len(data_vhds_storage_accounts) - if len_data_vhds_uris != len_data_vhds_luns or len_data_vhds_uris != len_data_vhds_storage_accounts: - raise ArgumentUsageError( - 'Length of --data-vhds-uris, --data-vhds-luns, --data-vhds-storage-accounts must be same.') - # Generate full storage account ID - for i, storage_account in enumerate(data_vhds_storage_accounts): - if not is_valid_resource_id(storage_account): - data_vhds_storage_accounts[i] = resource_id( - subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, - namespace='Microsoft.Storage', type='storageAccounts', name=storage_account) - if data_disk_images is None: - data_disk_images = [] - for uri, lun, account in zip(data_vhds_uris, data_vhds_luns, data_vhds_storage_accounts): - data_disk_images.append({ - "source": {"storage_account_id": account, "uri": uri}, - "lun": lun - }) - - storage_profile = {"source": source, "os_disk_image": os_disk_image, "data_disk_images": data_disk_images} - args = { - "publishing_profile": profile, - "location": location, - "tags": tags or {}, - "storage_profile": storage_profile - } - if allow_replicated_location_deletion is not None: - args["safety_profile"] = { - "allow_deletion_of_replicated_locations": allow_replicated_location_deletion - } - if block_deletion_before_end_of_life is not None: - if "safety_profile" not in args: - args["safety_profile"] = {} - - args["safety_profile"]["block_deletion_before_end_of_life"] = block_deletion_before_end_of_life - else: - if managed_image is None: - raise RequiredArgumentMissingError('usage error: Please provide --managed-image') - args = {"publishing_profile": profile, "location": location, "tags": tags or {}} - - args["resource_group"] = resource_group_name - args["gallery_name"] = gallery_name - args["gallery_image_definition"] = gallery_image_name - args["gallery_image_version_name"] = gallery_image_version - args["no_wait"] = no_wait - - from .aaz.latest.sig.image_version import Create - return Create(cli_ctx=cmd.cli_ctx)(command_args=args) - - -def undelete_image_version(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version, - location=None, tags=None, allow_replicated_location_deletion=None): - location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) - - from .aaz.latest.sig import Show as _SigShow - gallery = _SigShow(cli_ctx=cmd.cli_ctx)(command_args={ - "resource_group": resource_group_name, - "gallery_name": gallery_name - }) - - if gallery.get("softDeletePolicy", None) and gallery["softDeletePolicy"].get("isSoftDeleteEnabled", None): - soft_delete = gallery["softDeletePolicy"]["isSoftDeleteEnabled"] - else: - soft_delete = None - - if not soft_delete: - from azure.cli.core.azclierror import InvalidArgumentValueError - raise InvalidArgumentValueError('soft-deletion is not enabled in Gallery \'{}\''.format(gallery_name)) - - args = { - "publishing_profile": None, - "location": location, - "tags": tags or {}, - "storage_profile": None, - } - if allow_replicated_location_deletion is not None: - args["safety_profile"] = { - "allow_deletion_of_replicated_locations": allow_replicated_location_deletion - } - - args["resource_group"] = resource_group_name - args["gallery_name"] = gallery_name - args["gallery_image_definition"] = gallery_image_name - args["gallery_image_version_name"] = gallery_image_version - - from .aaz.latest.sig.image_version import Create - return Create(cli_ctx=cmd.cli_ctx)(command_args=args) - - -def fix_gallery_image_date_info(date_info): - # here we add needed time, if only date is provided, so the setting can be accepted by servie end - if date_info and 't' not in date_info.lower(): - date_info += 'T12:59:59Z' - return date_info - - -# pylint: disable=line-too-long -def get_image_version_to_update(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version_name): - from .aaz.latest.sig.image_version import Show as SigImageVersionShow - version = SigImageVersionShow(cli_ctx=cmd.cli_ctx)(command_args={ - "resource_group": resource_group_name, - "gallery_name": gallery_name, - "gallery_image_definition": gallery_image_name, - "gallery_image_version_name": gallery_image_version_name, - }) - - # To avoid unnecessary permission check of image - if "storageProfile" not in version: - version["storageProfile"] = {} - version["storageProfile"]["source"] = None - if version["storageProfile"].get("osDiskImage", None) and \ - version["storageProfile"]["osDiskImage"].get("source", None): - version["storageProfile"]["osDiskImage"]["source"] = None - if version["storageProfile"].get("dataDiskImages", None): - for v in version["storageProfile"]["dataDiskImages"]: - if v.get("source", None): - v["source"] = None - - return version - - -def update_image_version(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version_name, - target_regions=None, replica_count=None, allow_replicated_location_deletion=None, - target_edge_zones=None, block_deletion_before_end_of_life=None, no_wait=False, **kwargs): - args = kwargs['gallery_image_version'] - - from .operations.sig_image_version import convert_show_result_to_snake_case - args = convert_show_result_to_snake_case(args) - - if target_regions: - if "publishing_profile" not in args: - args["publishing_profile"] = {} - args["publishing_profile"]["target_regions"] = target_regions - if replica_count: - if "publishing_profile" not in args: - args["publishing_profile"] = {} - args["publishing_profile"]["replica_count"] = replica_count - if args.get("storage_profile", None) and args["storage_profile"].get("source", None) is not None: - args["storage_profile"]["os_disk_image"] = args["storage_profile"]["data_disk_images"] = None - # target extended locations will be updated when --target-edge-zones is specified - if target_edge_zones is not None: - if "publishing_profile" not in args: - args["publishing_profile"] = {} - args["publishing_profile"]["target_extended_locations"] = target_edge_zones \ - if len(target_edge_zones) > 0 else None - if allow_replicated_location_deletion is not None: - if "safety_profile" not in args: - args["safety_profile"] = {} - args["safety_profile"]["allow_deletion_of_replicated_locations"] = allow_replicated_location_deletion - if block_deletion_before_end_of_life is not None: - if "safety_profile" not in args: - args["safety_profile"] = {} - args["safety_profile"]["block_deletion_before_end_of_life"] = block_deletion_before_end_of_life - - args["resource_group"] = resource_group_name - args["gallery_name"] = gallery_name - args["gallery_image_definition"] = gallery_image_name - args["gallery_image_version_name"] = gallery_image_version_name - args["no_wait"] = no_wait - - from .aaz.latest.sig.image_version import Create - return Create(cli_ctx=cmd.cli_ctx)(command_args=args) -# endregion - - -# region dedicated host -def create_dedicated_host_group(cmd, client, host_group_name, resource_group_name, platform_fault_domain_count, - automatic_placement=None, location=None, zones=None, tags=None, ultra_ssd_enabled=None): - DedicatedHostGroup = cmd.get_models('DedicatedHostGroup') - location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) - - host_group_params = DedicatedHostGroup(location=location, platform_fault_domain_count=platform_fault_domain_count, - support_automatic_placement=automatic_placement, zones=zones, tags=tags) - if ultra_ssd_enabled is not None: - additionalCapabilities = {'ultraSSDEnabled': ultra_ssd_enabled} - host_group_params.additional_capabilities = additionalCapabilities - - return client.create_or_update(resource_group_name, host_group_name, parameters=host_group_params) - - -def get_dedicated_host_group_instance_view(client, host_group_name, resource_group_name): - return client.get(resource_group_name, host_group_name, expand="instanceView") - - -def create_dedicated_host(cmd, host_group_name, host_name, resource_group_name, sku, platform_fault_domain=None, - auto_replace_on_failure=None, license_type=None, location=None, tags=None): - from .aaz.latest.vm.host import Create as VmHostCreate - location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) - command_args = { - 'host_group_name': host_group_name, - 'host_name': host_name, - 'resource_group': resource_group_name, - 'location': location, - 'sku': { - 'name': sku - } - } - - if tags: - command_args['tags'] = tags - - if auto_replace_on_failure is not None: - command_args['auto_replace_on_failure'] = auto_replace_on_failure - - if license_type: - command_args['license_type'] = license_type - - if platform_fault_domain: - command_args['platform_fault_domain'] = platform_fault_domain - - return VmHostCreate(cli_ctx=cmd.cli_ctx)(command_args=command_args) - - -def get_dedicated_host_instance_view(cmd, host_group_name, host_name, resource_group_name): - from .aaz.latest.vm.host import Show as VmHostShow - command_args = { - 'host_group_name': host_group_name, - 'host_name': host_name, - 'resource_group': resource_group_name, - 'expand': 'instanceView' - } - return VmHostShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) - -# endregion - - -# region VMMonitor -def _get_log_analytics_client(cmd): - from ._client_factory import cf_log_analytics - from azure.cli.core.commands.client_factory import get_subscription_id - subscription_id = get_subscription_id(cmd.cli_ctx) - return cf_log_analytics(cmd.cli_ctx, subscription_id) - - -def _prepare_workspace(cmd, resource_group_name, workspace): - from azure.mgmt.core.tools import is_valid_resource_id - - from azure.core.exceptions import HttpResponseError - - workspace_id = None - if not is_valid_resource_id(workspace): - workspace_name = workspace - log_client = _get_log_analytics_client(cmd) - workspace_result = None - try: - workspace_result = log_client.workspaces.get(resource_group_name, workspace_name) - except HttpResponseError: - from azure.mgmt.loganalytics.models import Workspace, WorkspaceSku, WorkspaceSkuNameEnum - sku = WorkspaceSku(name=WorkspaceSkuNameEnum.per_gb2018.value) - retention_time = 30 # default value - location = _get_resource_group_location(cmd.cli_ctx, resource_group_name) - workspace_instance = Workspace(location=location, - sku=sku, - retention_in_days=retention_time) - workspace_result = LongRunningOperation(cmd.cli_ctx)(log_client.workspaces.begin_create_or_update( - resource_group_name, - workspace_name, - workspace_instance)) - workspace_id = workspace_result.id - else: - workspace_id = workspace - return workspace_id - - -def _set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name): - from ._client_factory import cf_log_analytics_data_sources - from azure.cli.core.commands.client_factory import get_subscription_id - from azure.mgmt.loganalytics.models import DataSource - from azure.core.exceptions import HttpResponseError - - subscription_id = get_subscription_id(cmd.cli_ctx) - data_sources_client = cf_log_analytics_data_sources(cmd.cli_ctx, subscription_id) - data_source_name_template = "DataSource_{}_{}" - - default_data_sources = None - if os_type.lower() == 'linux': - from ._workspace_data_source_settings import default_linux_data_sources - default_data_sources = default_linux_data_sources - elif os_type.lower() == 'windows': - from ._workspace_data_source_settings import default_windows_data_sources - default_data_sources = default_windows_data_sources - - if default_data_sources is not None: - for data_source_kind, data_source_settings in default_data_sources.items(): - for data_source_setting in data_source_settings: - data_source = DataSource(kind=data_source_kind, - properties=data_source_setting) - data_source_name = data_source_name_template.format(data_source_kind, _gen_guid()) - try: - data_sources_client.create_or_update(resource_group_name, - workspace_name, - data_source_name, - data_source) - except HttpResponseError as ex: - logger.warning("Failed to set data source due to %s. " - "Skip this step and need manual work later.", ex.message) - else: - logger.warning("Unsupported OS type. Skip the default settings for log analytics workspace.") - - -def execute_query_for_vm(cmd, client, resource_group_name, vm_name, analytics_query, timespan=None): - """Executes a query against the Log Analytics workspace linked with a vm.""" - vm = get_vm_by_aaz(cmd, resource_group_name, vm_name) - workspace = None - extension_resources = vm.get('resources', []) - for resource in extension_resources: - if resource.get('name') in (_WINDOWS_OMS_AGENT_EXT, _LINUX_OMS_AGENT_EXT): - workspace = resource.get('settings', {}).get('workspaceId', None) - if workspace is None: - raise CLIError('Cannot find the corresponding log analytics workspace. ' - 'Please check the status of log analytics workspace.') - return client.query_workspace(workspace, analytics_query, timespan=timespan) - - -def _set_log_analytics_workspace_extension(cmd, resource_group_name, vm, vm_name, workspace_name): - is_linux_os = _is_linux_os_by_aaz(vm) - vm_extension_name = _LINUX_OMS_AGENT_EXT if is_linux_os else _WINDOWS_OMS_AGENT_EXT - log_client = _get_log_analytics_client(cmd) - customer_id = log_client.workspaces.get(resource_group_name, workspace_name).customer_id - settings = { - 'workspaceId': customer_id, - 'stopOnMultipleConnections': 'true' - } - primary_shared_key = log_client.shared_keys.get_shared_keys(resource_group_name, workspace_name).primary_shared_key - protected_settings = { - 'workspaceKey': primary_shared_key, - } - return set_extension(cmd, resource_group_name, vm_name, vm_extension_name, - extension_mappings[vm_extension_name]['publisher'], - extension_mappings[vm_extension_name]['version'], - settings, - protected_settings) -# endregion - - -# disk encryption set -def show_disk_encryption_set_identity(cmd, resource_group_name, disk_encryption_set_name): - from .aaz.latest.disk_encryption_set import Show as _Show - des = _Show(cli_ctx=cmd.cli_ctx)(command_args={ - "disk_encryption_set_name": disk_encryption_set_name, - "resource_group": resource_group_name - }) - return des.get('identity', {}) -# endregion - - -# region install patches -def install_vm_patches(cmd, resource_group_name, vm_name, maximum_duration, reboot_setting, - classifications_to_include_win=None, classifications_to_include_linux=None, - kb_numbers_to_include=None, kb_numbers_to_exclude=None, exclude_kbs_requiring_reboot=None, - package_name_masks_to_include=None, package_name_masks_to_exclude=None, - max_patch_publish_date=None, no_wait=False): - from .aaz.latest.vm import InstallPatches as VmInstallPatches - - command_args = { - 'resource_group': resource_group_name, - 'name': vm_name, - 'maximum_duration': maximum_duration, - 'reboot_setting': reboot_setting, - 'linux_parameters': { - 'classifications_to_include': classifications_to_include_linux, - 'package_name_masks_to_exclude': package_name_masks_to_exclude, - 'package_name_masks_to_include': package_name_masks_to_include - }, - 'windows_parameters': { - 'classifications_to_include': classifications_to_include_win, - 'exclude_kbs_requiring_reboot': exclude_kbs_requiring_reboot, - 'kb_numbers_to_exclude': kb_numbers_to_exclude, - 'kb_numbers_to_include': kb_numbers_to_include, - 'max_patch_publish_date': max_patch_publish_date - }, - 'no_wait': no_wait - } - - return VmInstallPatches(cli_ctx=cmd.cli_ctx)(command_args=command_args) -# endregion - - -def get_page_result(generator, marker, show_next_marker=None): - pages = generator.by_page(continuation_token=marker) # ContainerPropertiesPaged - result = list_generator(pages=pages) - - if show_next_marker: - next_marker = {"nextMarker": pages.continuation_token} - result.append(next_marker) - else: - if pages.continuation_token: - logger.warning('Next Marker:') - logger.warning(pages.continuation_token) - - return result - - -def list_generator(pages, num_results=50): - result = [] - - # get first page items - page = list(next(pages)) - result += page - - while True: - if not pages.continuation_token: - break - - # handle num results - if num_results is not None: - if num_results <= len(result): - break - - page = list(next(pages)) - result += page - - return result - - -def gallery_application_version_create(client, - resource_group_name, - gallery_name, - gallery_application_name, - gallery_application_version_name, - location, - package_file_link, - install_command, - remove_command, - tags=None, - update_command=None, - target_regions=None, - default_file_link=None, - end_of_life_date=None, - package_file_name=None, - config_file_name=None, - exclude_from=None, - no_wait=False): - gallery_application_version = {} - gallery_application_version['publishing_profile'] = {} - gallery_application_version['location'] = location - if tags is not None: - gallery_application_version['tags'] = tags - source = {} - source['media_link'] = package_file_link - if default_file_link is not None: - source['default_configuration_link'] = default_file_link - gallery_application_version['publishing_profile']['source'] = source - manage_actions = {} - manage_actions['install'] = install_command - manage_actions['remove'] = remove_command - if update_command is not None: - manage_actions['update'] = update_command - gallery_application_version['publishing_profile']['manage_actions'] = manage_actions - if target_regions is not None: - gallery_application_version['publishing_profile']['target_regions'] = target_regions - if exclude_from is not None: - gallery_application_version['publishing_profile']['exclude_from_latest'] = exclude_from - if end_of_life_date is not None: - gallery_application_version['publishing_profile']['end_of_life_date'] = end_of_life_date - settings = {} - if package_file_name is not None: - settings['package_file_name'] = package_file_name - if config_file_name is not None: - settings['config_file_name'] = config_file_name - if settings: - gallery_application_version['publishing_profile']['settings'] = settings - return sdk_no_wait(no_wait, - client.begin_create_or_update, - resource_group_name=resource_group_name, - gallery_name=gallery_name, - gallery_application_name=gallery_application_name, - gallery_application_version_name=gallery_application_version_name, - gallery_application_version=gallery_application_version) - - -def gallery_application_version_update(client, - resource_group_name, - gallery_name, - gallery_application_name, - gallery_application_version_name, - location, - package_file_link, - tags=None, - target_regions=None, - default_file_link=None, - end_of_life_date=None, - exclude_from=None, - no_wait=False): - gallery_application_version = {} - gallery_application_version['publishing_profile'] = {} - gallery_application_version['location'] = location - if tags is not None: - gallery_application_version['tags'] = tags - source = {} - source['media_link'] = package_file_link - if default_file_link is not None: - source['default_configuration_link'] = default_file_link - gallery_application_version['publishing_profile']['source'] = source - if target_regions is not None: - gallery_application_version['publishing_profile']['target_regions'] = [target_regions] - if exclude_from is not None: - gallery_application_version['publishing_profile']['exclude_from_latest'] = exclude_from - if end_of_life_date is not None: - gallery_application_version['publishing_profile']['end_of_life_date'] = end_of_life_date - return sdk_no_wait(no_wait, - client.begin_create_or_update, - resource_group_name=resource_group_name, - gallery_name=gallery_name, - gallery_application_name=gallery_application_name, - gallery_application_version_name=gallery_application_version_name, - gallery_application_version=gallery_application_version) - - -def create_capacity_reservation_group(cmd, client, resource_group_name, capacity_reservation_group_name, location=None, - tags=None, zones=None, sharing_profile=None): - CapacityReservationGroup = cmd.get_models('CapacityReservationGroup') - if sharing_profile is not None: - subscription_ids = [{'id': sub_id} for sub_id in sharing_profile] - sharing_profile = {'subscriptionIds': subscription_ids} - capacity_reservation_group = CapacityReservationGroup(location=location, tags=tags, - zones=zones, sharing_profile=sharing_profile) - return client.create_or_update(resource_group_name=resource_group_name, - capacity_reservation_group_name=capacity_reservation_group_name, - parameters=capacity_reservation_group) - - -def update_capacity_reservation_group(cmd, client, resource_group_name, capacity_reservation_group_name, tags=None, - sharing_profile=None): - CapacityReservationGroupUpdate = cmd.get_models('CapacityReservationGroupUpdate') - if sharing_profile is not None: - subscription_ids = [{'id': sub_id} for sub_id in sharing_profile] - sharing_profile = {'subscriptionIds': subscription_ids} - capacity_reservation_group = CapacityReservationGroupUpdate(tags=tags, sharing_profile=sharing_profile) - return client.update(resource_group_name=resource_group_name, - capacity_reservation_group_name=capacity_reservation_group_name, - parameters=capacity_reservation_group) - - -def show_capacity_reservation_group(client, resource_group_name, capacity_reservation_group_name, - instance_view=None): - expand = None - if instance_view: - expand = 'instanceView' - return client.get(resource_group_name=resource_group_name, - capacity_reservation_group_name=capacity_reservation_group_name, - expand=expand) - - -def set_vm_applications(cmd, vm_name, resource_group_name, application_version_ids, order_applications=False, application_configuration_overrides=None, treat_deployment_as_failure=None, enable_automatic_upgrade=None, no_wait=False): - from .aaz.latest.vm import Update as _VMUpdate - - class SetVMApplications(_VMUpdate): - def pre_operations(self): - args = self.ctx.args - args.no_wait = no_wait - - def pre_instance_update(self, instance): - instance.properties.application_profile.gallery_applications = [{"package_reference_id": avid} for avid in application_version_ids] - - if order_applications: - index = 1 - for app in instance.properties.application_profile.gallery_applications: - app["order"] = index - index += 1 - - if application_configuration_overrides: - index = 0 - for over_ride in application_configuration_overrides: - if over_ride or over_ride.lower() != 'null': - instance.properties.application_profile.gallery_applications[index]["configuration_reference"] = over_ride - index += 1 - - if treat_deployment_as_failure: - index = 0 - for treat_as_failure in treat_deployment_as_failure: - instance.properties.application_profile.gallery_applications[index]["treat_failure_as_deployment_failure"] = \ - treat_as_failure.lower() == 'true' - index += 1 - - if enable_automatic_upgrade: - index = 0 - for enable_auto_upgrade in enable_automatic_upgrade: - instance.properties.application_profile.gallery_applications[index]["enable_automatic_upgrade"] = \ - enable_auto_upgrade.lower() == 'true' - index += 1 - - def _output(self, *args, **kwargs): - from azure.cli.core.aaz import AAZUndefined, has_value - - # Resolve flatten conflict - # When the type field conflicts, the type in inner layer is ignored and the outer layer is applied - if has_value(self.ctx.vars.instance.resources): - for resource in self.ctx.vars.instance.resources: - if has_value(resource.type): - resource.type = AAZUndefined - - result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) - return result - - return SetVMApplications(cli_ctx=cmd.cli_ctx)(command_args={ - "resource_group": resource_group_name, - "vm_name": vm_name, - }) - - -def list_vm_applications(cmd, vm_name, resource_group_name): - try: - from .operations.vm import VMShow - vm = VMShow(cli_ctx=cmd.cli_ctx)(command_args={ - "resource_group": resource_group_name, - "vm_name": vm_name - }) - except ResourceNotFoundError: - raise ResourceNotFoundError('Could not find vm {}.'.format(vm_name)) - return vm.get("applicationProfile", {}) - - -def set_vmss_applications(cmd, vmss_name, resource_group_name, application_version_ids, order_applications=False, application_configuration_overrides=None, treat_deployment_as_failure=None, enable_automatic_upgrade=None, no_wait=False): - from .aaz.latest.vmss import Update as _VMSSUpdate - - class SetVMSSApplications(_VMSSUpdate): - def pre_operations(self): - args = self.ctx.args - args.no_wait = no_wait - - def pre_instance_update(self, instance): - instance.properties.virtualMachineProfile.application_profile.gallery_applications = [{"package_reference_id": avid} for avid in application_version_ids] - - if order_applications: - index = 1 - for app in instance.properties.virtualMachineProfile.application_profile.gallery_applications: - app["order"] = index - index += 1 - - if application_configuration_overrides: - index = 0 - for over_ride in application_configuration_overrides: - if over_ride or over_ride.lower() != 'null': - instance.properties.virtual_machine_profile.application_profile.gallery_applications[index]["configuration_reference"] = over_ride - index += 1 - - if treat_deployment_as_failure: - index = 0 - for treat_as_failure in treat_deployment_as_failure: - instance.properties.virtual_machine_profile.application_profile.gallery_applications[index]["treat_failure_as_deployment_failure"] = \ - treat_as_failure.lower() == 'true' - index += 1 - - if enable_automatic_upgrade: - index = 0 - for enable_auto_upgrade in enable_automatic_upgrade: - instance.properties.virtual_machine_profile.application_profile.gallery_applications[index]["enable_automatic_upgrade"] = \ - enable_auto_upgrade.lower() == 'true' - index += 1 - - def _output(self, *args, **kwargs): - from azure.cli.core.aaz import AAZUndefined, has_value - - # Resolve flatten conflict - # When the type field conflicts, the type in inner layer is ignored and the outer layer is applied - print(self.ctx.vars.instance.properties.virtual_machine_profile.extension_profile.extensions) - if has_value(self.ctx.vars.instance.properties.virtual_machine_profile.extension_profile.extensions): - for extension in self.ctx.vars.instance.properties.virtual_machine_profile.extension_profile.extensions: - if has_value(extension.type): - extension.type = AAZUndefined - - result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) - return result - - return SetVMSSApplications(cli_ctx=cmd.cli_ctx)(command_args={ - "resource_group": resource_group_name, - "vm_scale_set_name": vmss_name, - }) - - -def list_vmss_applications(cmd, vmss_name, resource_group_name): - from .operations.vmss import VMSSShow - try: - command_args = { - 'resource_group': resource_group_name, - 'vm_scale_set_name': vmss_name - } - vmss = VMSSShow(cli_ctx=cmd.cli_ctx)(command_args=command_args) - except ResourceNotFoundError: - raise ResourceNotFoundError('Could not find vmss {}.'.format(vmss_name)) - return vmss.get('virtualMachineProfile', {}).get('applicationProfile', {}) - - -# region Restore point collection -def restore_point_create(cmd, - resource_group_name, - restore_point_collection_name, - restore_point_name, - exclude_disks=None, - source_restore_point=None, - consistency_mode=None, - source_os_resource=None, - os_restore_point_encryption_set=None, - os_restore_point_encryption_type=None, - source_data_disk_resource=None, - data_disk_restore_point_encryption_set=None, - data_disk_restore_point_encryption_type=None, - no_wait=False): - parameters = { - 'restore_point_collection_name': restore_point_collection_name, - 'restore_point_name': restore_point_name, - 'resource_group': resource_group_name, - 'no_wait': no_wait - } - if exclude_disks is not None: - parameters['exclude_disks'] = [] - for disk in exclude_disks: - parameters['exclude_disks'].append({'id': disk}) - if source_restore_point is not None: - parameters['source_restore_point'] = {'id': source_restore_point} - if consistency_mode is not None: - parameters['consistency_mode'] = consistency_mode - - storage_profile = {} - # Local restore point - if source_restore_point is None: - os_disk = {} - if source_os_resource is not None: - managed_disk = { - 'id': source_os_resource - } - os_disk['managed_disk'] = managed_disk - if os_restore_point_encryption_set is None and os_restore_point_encryption_type is None: - raise ArgumentUsageError('usage error: --os-restore-point-encryption-set or --os-restore-point-encryption-type must be used together with --source-os-resource') - - disk_restore_point = {} - if os_restore_point_encryption_set is not None or os_restore_point_encryption_type is not None: - encryption = {} - if os_restore_point_encryption_set is not None: - encryption['disk_encryption_set'] = { - 'id': os_restore_point_encryption_set - } - if os_restore_point_encryption_type is not None: - encryption['type'] = os_restore_point_encryption_type - - if encryption: - disk_restore_point['encryption'] = encryption - - if disk_restore_point: - os_disk['disk_restore_point'] = disk_restore_point - - if os_disk: - storage_profile['os_disk'] = os_disk - - data_disks = [] - if source_data_disk_resource is not None: - if data_disk_restore_point_encryption_set is None and data_disk_restore_point_encryption_type is None: - raise ArgumentUsageError('usage error: --data-disk-restore-point-encryption-set or --data-disk-restore-point-encryption-type must be used together with --source-data-disk-resource') - if data_disk_restore_point_encryption_set is not None and (len(source_data_disk_resource) != len(data_disk_restore_point_encryption_set)): - raise ArgumentUsageError('Length of --source-data-disk-resource, --data-disk-restore-point-encryption-set must be same.') - if data_disk_restore_point_encryption_type is not None and (len(source_data_disk_resource) != len(data_disk_restore_point_encryption_type)): - raise ArgumentUsageError('Length of --source-data-disk-resource, --data-disk-restore-point-encryption-type must be same.') - - for i, v in enumerate(source_data_disk_resource): - data_disks.append({ - 'managed_disk': { - 'id': v - }, - 'disk_restore_point': { - 'encryption': { - 'disk_encryption_set': { - 'id': data_disk_restore_point_encryption_set[i] if data_disk_restore_point_encryption_set is not None else None - }, - 'type': data_disk_restore_point_encryption_type[i] if data_disk_restore_point_encryption_type is not None else None - } - } - }) - - if data_disks: - storage_profile['data_disks'] = data_disks - - # Remote restore point - if source_restore_point is not None: - os_disk = {} - disk_restore_point = {} - if source_os_resource is not None: - source_disk_restore_point = { - 'id': source_os_resource - } - disk_restore_point['source_disk_restore_point'] = source_disk_restore_point - if os_restore_point_encryption_set is None and os_restore_point_encryption_type is None: - raise ArgumentUsageError('usage error: --os-restore-point-encryption-set or --os-restore-point-encryption-type must be used together with --source-os-resource') - - if os_restore_point_encryption_set is not None or os_restore_point_encryption_type is not None: - encryption = {} - if os_restore_point_encryption_set is not None: - encryption['disk_encryption_set'] = { - 'id': os_restore_point_encryption_set - } - if os_restore_point_encryption_type is not None: - encryption['type'] = os_restore_point_encryption_type - - if encryption: - disk_restore_point['encryption'] = encryption - if disk_restore_point: - os_disk['disk_restore_point'] = disk_restore_point - if os_disk: - storage_profile['os_disk'] = os_disk - - data_disks = [] - if source_data_disk_resource is not None: - if data_disk_restore_point_encryption_set is None and data_disk_restore_point_encryption_type is None: - raise ArgumentUsageError('usage error: --data-disk-restore-point-encryption-set or --data-disk-restore-point-encryption-type must be used together with --source-data-disk-resource') - if data_disk_restore_point_encryption_set is not None and (len(source_data_disk_resource) != len(data_disk_restore_point_encryption_set)): - raise ArgumentUsageError('Length of --source-data-disk-resource, --data-disk-restore-point-encryption-set must be same.') - if data_disk_restore_point_encryption_type is not None and (len(source_data_disk_resource) != len(data_disk_restore_point_encryption_type)): - raise ArgumentUsageError('Length of --source-data-disk-resource, --data-disk-restore-point-encryption-type must be same.') - - for i, v in enumerate(source_data_disk_resource): - data_disks.append({ - 'disk_restore_point': { - 'source_disk_restore_point': { - 'id': v - }, - 'encryption': { - 'disk_encryption_set': { - 'id': data_disk_restore_point_encryption_set[i] if data_disk_restore_point_encryption_set is not None else None - }, - 'type': data_disk_restore_point_encryption_type[i] if data_disk_restore_point_encryption_type is not None else None - } - } - }) - if data_disks: - storage_profile['data_disks'] = data_disks - - if storage_profile: - parameters['source_metadata'] = {'storage_profile': storage_profile} - - from .aaz.latest.restore_point import Create - return Create(cli_ctx=cmd.cli_ctx)(command_args=parameters) - - -def restore_point_show(cmd, - resource_group_name, - restore_point_name, - restore_point_collection_name, - expand=None, - instance_view=None): - args = { - 'resource_group': resource_group_name, - 'restore_point_collection_name': restore_point_collection_name, - 'restore_point_name': restore_point_name, - 'expand': expand - } - - if instance_view is not None: - args['expand'] = 'instanceView' - - from .aaz.latest.restore_point import Show - return Show(cli_ctx=cmd.cli_ctx)(command_args=args) - -# endRegion - - -# region Restore point collection -def restore_point_collection_show(cmd, - resource_group_name, - restore_point_collection_name, - expand=None, - restore_points=None): - from .aaz.latest.restore_point.collection import Show - args = { - "resource_group": resource_group_name, - "restore_point_collection_name": restore_point_collection_name, - } - if restore_points is not None: - args['expand'] = 'restorePoints' - return Show(cli_ctx=cmd.cli_ctx)(command_args=args) - -# endRegion - - -# region Community gallery -def sig_community_gallery_list(cmd, location=None, marker=None, show_next_marker=None): - from ._arg_client import ARGClient, QueryBody - - query_table = 'communitygalleryresources' - query_type = 'microsoft.compute/locations/communitygalleries' - - query = "{}| where type == '{}' ".format(query_table, query_type) - if location: - # Since the location data in table "communitygalleryresources" is in lowercase - # For accurate matching, we also need to convert the location in the query statement to lowercase - query = query + "| where location == '{}' ".format(location.lower()) - query_body = QueryBody(query) - - item_count_per_page = 30 - query_body.options = { - "$top": item_count_per_page - } - - if marker: - query_body.options['$skipToken'] = marker - - query_result = ARGClient(cmd.cli_ctx).send(query_body) - result = _transform_community_gallery_list_output(query_result) - - continuation_token = query_result.get('$skipToken') - - if show_next_marker: - next_marker = {"nextMarker": continuation_token} - result.append(next_marker) - else: - if continuation_token: - logger.warning('Next Marker:') - logger.warning(continuation_token) - - return result - - -def _transform_community_gallery_list_output(result): - - result_data = result.get('data') - if not result_data: - return [] - - output = [] - for data_item in result_data: - from collections import OrderedDict - output_item = OrderedDict() - output_item['id'] = data_item.get('id') - output_item['location'] = data_item.get('location') - output_item['name'] = data_item.get('name') - - properties = data_item.get('properties') - if properties: - output_item['communityMetadata'] = properties.get('communityMetadata', {}) - output_item['uniqueId'] = properties.get('identifier', {}).get('uniqueId') - - output.append(output_item) - - return output - - -def list_vm_sizes(cmd, location): - from .operations.vm import VMListSizes - return VMListSizes(cli_ctx=cmd.cli_ctx)(command_args={ - "location": location, - }) - - -# endRegion