From 91f71e8fdd2c79b676f065960dcf604cc95f1616 Mon Sep 17 00:00:00 2001 From: Zhuolun Liu Date: Fri, 23 Feb 2024 11:06:58 -0500 Subject: [PATCH] release v0.5.0 --- README.md | 42 +- .../{daas_admin_scope.md => admin_scope.md} | 8 +- ...tails.md => application_folder_details.md} | 4 +- docs/data-sources/{daas_vda.md => vda.md} | 8 +- .../{daas_admin_role.md => admin_role.md} | 10 +- .../{daas_admin_scope.md => admin_scope.md} | 8 +- .../{daas_application.md => application.md} | 12 +- ...cation_folder.md => application_folder.md} | 12 +- ...as_aws_hypervisor.md => aws_hypervisor.md} | 8 +- ...ool.md => aws_hypervisor_resource_pool.md} | 10 +- ...zure_hypervisor.md => azure_hypervisor.md} | 8 +- ...l.md => azure_hypervisor_resource_pool.md} | 10 +- ...as_delivery_group.md => delivery_group.md} | 12 +- ...as_gcp_hypervisor.md => gcp_hypervisor.md} | 8 +- ...ool.md => gcp_hypervisor_resource_pool.md} | 10 +- ..._machine_catalog.md => machine_catalog.md} | 34 +- docs/resources/nutanix_hypervisor.md | 57 + docs/resources/policy_set.md | 119 + docs/resources/vsphere_hypervisor.md | 58 + docs/resources/xenserver_hypervisor.md | 62 + .../xenserver_hypervisor_resource_pool.md | 59 + docs/resources/{daas_zone.md => zone.md} | 8 +- .../basic_azure_mcs_vda/delivery_group.tf | 4 +- .../daas/basic_azure_mcs_vda/hypervisors.tf | 4 +- .../basic_azure_mcs_vda/machine_catalogs.tf | 8 +- .../basic_azure_mcs_vda/resource_pools.tf | 4 +- examples/daas/basic_azure_mcs_vda/zones.tf | 2 +- .../daas/basic_gcp_mcs_vda/delivery_group.tf | 4 +- .../daas/basic_gcp_mcs_vda/hypervisors.tf | 4 +- .../basic_gcp_mcs_vda/machine_catalogs.tf | 8 +- .../daas/basic_gcp_mcs_vda/resource_pools.tf | 4 +- examples/daas/basic_gcp_mcs_vda/zones.tf | 2 +- go.mod | 28 +- go.sum | 78 +- .../admin_role/admin_role_resource.go | 2 +- .../admin_role/admin_role_resource_model.go | 0 .../admin_scope/admin_scope_data_source.go | 2 +- .../admin_scope_data_source_model.go | 0 .../admin_scope/admin_scope_resource.go | 2 +- .../admin_scope/admin_scope_resource_model.go | 0 .../application_folder_details_data_source.go | 4 +- ...cation_folder_details_data_source_model.go | 23 +- .../application_folder_resource.go | 4 +- .../application_folder_resource_model.go | 2 +- .../application/application_resource.go | 2 +- .../application/application_resource_model.go | 0 .../delivery_group/delivery_group_resource.go | 44 +- .../delivery_group_resource_model.go | 7 + .../delivery_group/delivery_group_utils.go | 34 +- .../hypervisor/aws_hypervisor_resource.go | 2 +- .../aws_hypervisor_resource_model.go | 0 .../hypervisor/azure_hypervisor_resource.go | 2 +- .../azure_hypervisor_resource_model.go | 0 .../hypervisor/gcp_hypervisor_resource.go | 2 +- .../gcp_hypervisor_resource_model.go | 0 .../hypervisor/hypervisor_common.go | 40 +- .../hypervisor/nutanix_hypervisor_resource.go | 323 +++ .../nutanix_hypervisor_resource_model.go | 39 + .../hypervisor/vsphere_hypervisor_resource.go | 346 +++ .../vsphere_hypervisor_resource_model.go | 47 + .../xenserver_hypervisor_resource.go | 346 +++ .../xenserver_hypervisor_resource_model.go | 47 + .../aws_hypervisor_resource_pool_resource.go | 2 +- ...hypervisor_resource_pool_resource_model.go | 0 ...azure_hypervisor_resource_pool_resource.go | 6 +- ...hypervisor_resource_pool_resource_model.go | 0 .../gcp_hypervisor_resource_pool_resource.go | 2 +- ...hypervisor_resource_pool_resource_model.go | 0 .../hypervisor_resource_pool_common.go | 4 +- ...erver_hypervisor_resource_pool_resource.go | 362 +++ ...hypervisor_resource_pool_resource_model.go | 51 + .../machine_catalog_common_utils.go | 340 +++ .../machine_catalog_manual_utils.go | 512 ++++ .../machine_catalog_mcs_utils.go | 791 ++++++ .../machine_catalog_remote_pc_utils.go | 45 + .../machine_catalog_resource.go | 500 ++++ .../machine_catalog_resource_model.go | 157 ++ .../machine_catalog_schema_utils.go | 620 +++++ .../machine_catalog/machine_config.go | 0 internal/daas/policies/policy_set_resource.go | 835 +++++++ .../policies/policy_set_resource_model.go | 106 + .../machine_catalog_resource.go | 2160 ----------------- .../machine_catalog_resource_model.go | 538 ---- .../{data_sources => }/vda/vda_data_source.go | 2 +- .../vda/vda_data_source_model.go | 0 .../{resources => }/zone/zone_resource.go | 2 +- .../zone/zone_resource_model.go | 0 .../data-source.tf | 4 +- .../data-source.tf | 4 +- .../resources/citrix_admin_role/import.sh | 2 + .../resource.tf | 4 +- .../resources/citrix_admin_scope/import.sh | 2 + .../resource.tf | 2 +- .../resources/citrix_application/import.sh | 2 + .../resource.tf | 6 +- .../citrix_application_folder/import.sh | 2 + .../citrix_application_folder/resource.tf | 8 + .../resources/citrix_aws_hypervisor/import.sh | 2 + .../resource.tf | 2 +- .../import.sh | 2 + .../resource.tf | 4 +- .../citrix_azure_hypervisor/import.sh | 2 + .../resource.tf | 2 +- .../import.sh | 2 + .../resource.tf | 4 +- .../citrix_daas_admin_role/import.sh | 2 - .../citrix_daas_admin_scope/import.sh | 2 - .../citrix_daas_application/import.sh | 2 - .../citrix_daas_application_folder/import.sh | 2 - .../resource.tf | 8 - .../citrix_daas_aws_hypervisor/import.sh | 2 - .../import.sh | 2 - .../citrix_daas_azure_hypervisor/import.sh | 2 - .../import.sh | 2 - .../citrix_daas_delivery_group/import.sh | 2 - .../citrix_daas_gcp_hypervisor/import.sh | 2 - .../import.sh | 2 - .../citrix_daas_machine_catalog/import.sh | 2 - .../resources/citrix_daas_zone/import.sh | 2 - .../resources/citrix_delivery_group/import.sh | 2 + .../resource.tf | 5 +- .../resources/citrix_gcp_hypervisor/import.sh | 2 + .../resource.tf | 2 +- .../import.sh | 2 + .../resource.tf | 4 +- .../citrix_machine_catalog/import.sh | 2 + .../resource.tf | 20 +- .../citrix_nutanix_hypervisor/import.sh | 2 + .../citrix_nutanix_hypervisor/resource.tf | 10 + .../resources/citrix_policy_set/import.sh | 2 + .../resources/citrix_policy_set/resource.tf | 38 + .../citrix_vsphere_hypervisor/import.sh | 2 + .../citrix_vsphere_hypervisor/resource.tf | 10 + .../citrix_xenserver_hypervisor/import.sh | 2 + .../citrix_xenserver_hypervisor/resource.tf | 14 + .../import.sh | 2 + .../resource.tf | 15 + .../examples/resources/citrix_zone/import.sh | 2 + .../resource.tf | 2 +- internal/provider/provider.go | 36 +- internal/test/admin_role_resource_test.go | 36 +- internal/test/admin_scope_data_source_test.go | 32 +- internal/test/admin_scope_resource_test.go | 37 +- .../test/application_folder_resource_test.go | 24 +- internal/test/application_resource_test.go | 31 +- internal/test/delivery_group_test.go | 153 +- .../test/hypervisor_resource_pool_test.go | 167 +- internal/test/hypervisor_resource_test.go | 369 ++- .../test/machine_catalog_resource_test.go | 548 ++++- internal/test/policy_set_resource_test.go | 247 ++ internal/test/zone_resource_test.go | 39 +- internal/util/common.go | 37 +- internal/util/resource.go | 62 +- settings.cloud.example.json | 113 +- settings.onprem.example.json | 116 +- 155 files changed, 7995 insertions(+), 3354 deletions(-) rename docs/data-sources/{daas_admin_scope.md => admin_scope.md} (83%) rename docs/data-sources/{daas_application_folder_details.md => application_folder_details.md} (93%) rename docs/data-sources/{daas_vda.md => vda.md} (88%) rename docs/resources/{daas_admin_role.md => admin_role.md} (87%) rename docs/resources/{daas_admin_scope.md => admin_scope.md} (86%) rename docs/resources/{daas_application.md => application.md} (81%) rename docs/resources/{daas_application_folder.md => application_folder.md} (62%) rename docs/resources/{daas_aws_hypervisor.md => aws_hypervisor.md} (79%) rename docs/resources/{daas_aws_hypervisor_resource_pool.md => aws_hypervisor_resource_pool.md} (71%) rename docs/resources/{daas_azure_hypervisor.md => azure_hypervisor.md} (83%) rename docs/resources/{daas_azure_hypervisor_resource_pool.md => azure_hypervisor_resource_pool.md} (73%) rename docs/resources/{daas_delivery_group.md => delivery_group.md} (97%) rename docs/resources/{daas_gcp_hypervisor.md => gcp_hypervisor.md} (79%) rename docs/resources/{daas_gcp_hypervisor_resource_pool.md => gcp_hypervisor_resource_pool.md} (73%) rename docs/resources/{daas_machine_catalog.md => machine_catalog.md} (91%) create mode 100644 docs/resources/nutanix_hypervisor.md create mode 100644 docs/resources/policy_set.md create mode 100644 docs/resources/vsphere_hypervisor.md create mode 100644 docs/resources/xenserver_hypervisor.md create mode 100644 docs/resources/xenserver_hypervisor_resource_pool.md rename docs/resources/{daas_zone.md => zone.md} (88%) rename internal/daas/{resources => }/admin_role/admin_role_resource.go (99%) rename internal/daas/{resources => }/admin_role/admin_role_resource_model.go (100%) rename internal/daas/{data_sources => }/admin_scope/admin_scope_data_source.go (98%) rename internal/daas/{data_sources => }/admin_scope/admin_scope_data_source_model.go (100%) rename internal/daas/{resources => }/admin_scope/admin_scope_resource.go (99%) rename internal/daas/{resources => }/admin_scope/admin_scope_resource_model.go (100%) rename internal/daas/{data_sources/application_folder_details => application}/application_folder_details_data_source.go (97%) rename internal/daas/{data_sources/application_folder_details => application}/application_folder_details_data_source_model.go (67%) rename internal/daas/{resources/application_folder => application}/application_folder_resource.go (99%) rename internal/daas/{resources/application_folder => application}/application_folder_resource_model.go (97%) rename internal/daas/{resources => }/application/application_resource.go (99%) rename internal/daas/{resources => }/application/application_resource_model.go (100%) rename internal/daas/{resources => }/delivery_group/delivery_group_resource.go (97%) rename internal/daas/{resources => }/delivery_group/delivery_group_resource_model.go (96%) rename internal/daas/{resources => }/delivery_group/delivery_group_utils.go (98%) rename internal/daas/{resources => }/hypervisor/aws_hypervisor_resource.go (99%) rename internal/daas/{resources => }/hypervisor/aws_hypervisor_resource_model.go (100%) rename internal/daas/{resources => }/hypervisor/azure_hypervisor_resource.go (99%) rename internal/daas/{resources => }/hypervisor/azure_hypervisor_resource_model.go (100%) rename internal/daas/{resources => }/hypervisor/gcp_hypervisor_resource.go (99%) rename internal/daas/{resources => }/hypervisor/gcp_hypervisor_resource_model.go (100%) rename internal/daas/{resources => }/hypervisor/hypervisor_common.go (76%) create mode 100644 internal/daas/hypervisor/nutanix_hypervisor_resource.go create mode 100644 internal/daas/hypervisor/nutanix_hypervisor_resource_model.go create mode 100644 internal/daas/hypervisor/vsphere_hypervisor_resource.go create mode 100644 internal/daas/hypervisor/vsphere_hypervisor_resource_model.go create mode 100644 internal/daas/hypervisor/xenserver_hypervisor_resource.go create mode 100644 internal/daas/hypervisor/xenserver_hypervisor_resource_model.go rename internal/daas/{resources => }/hypervisor_resource_pool/aws_hypervisor_resource_pool_resource.go (99%) rename internal/daas/{resources => }/hypervisor_resource_pool/aws_hypervisor_resource_pool_resource_model.go (100%) rename internal/daas/{resources => }/hypervisor_resource_pool/azure_hypervisor_resource_pool_resource.go (97%) rename internal/daas/{resources => }/hypervisor_resource_pool/azure_hypervisor_resource_pool_resource_model.go (100%) rename internal/daas/{resources => }/hypervisor_resource_pool/gcp_hypervisor_resource_pool_resource.go (99%) rename internal/daas/{resources => }/hypervisor_resource_pool/gcp_hypervisor_resource_pool_resource_model.go (100%) rename internal/daas/{resources => }/hypervisor_resource_pool/hypervisor_resource_pool_common.go (97%) create mode 100644 internal/daas/hypervisor_resource_pool/xenserver_hypervisor_resource_pool_resource.go create mode 100644 internal/daas/hypervisor_resource_pool/xenserver_hypervisor_resource_pool_resource_model.go create mode 100644 internal/daas/machine_catalog/machine_catalog_common_utils.go create mode 100644 internal/daas/machine_catalog/machine_catalog_manual_utils.go create mode 100644 internal/daas/machine_catalog/machine_catalog_mcs_utils.go create mode 100644 internal/daas/machine_catalog/machine_catalog_remote_pc_utils.go create mode 100644 internal/daas/machine_catalog/machine_catalog_resource.go create mode 100644 internal/daas/machine_catalog/machine_catalog_resource_model.go create mode 100644 internal/daas/machine_catalog/machine_catalog_schema_utils.go rename internal/daas/{resources => }/machine_catalog/machine_config.go (100%) create mode 100644 internal/daas/policies/policy_set_resource.go create mode 100644 internal/daas/policies/policy_set_resource_model.go delete mode 100644 internal/daas/resources/machine_catalog/machine_catalog_resource.go delete mode 100644 internal/daas/resources/machine_catalog/machine_catalog_resource_model.go rename internal/daas/{data_sources => }/vda/vda_data_source.go (98%) rename internal/daas/{data_sources => }/vda/vda_data_source_model.go (100%) rename internal/daas/{resources => }/zone/zone_resource.go (99%) rename internal/daas/{resources => }/zone/zone_resource_model.go (100%) rename internal/examples/data-sources/{citrix_daas_admin_scope => citrix_admin_scope}/data-source.tf (56%) rename internal/examples/data-sources/{citrix_daas_vda => citrix_vda}/data-source.tf (67%) create mode 100644 internal/examples/resources/citrix_admin_role/import.sh rename internal/examples/resources/{citrix_daas_admin_role => citrix_admin_role}/resource.tf (78%) create mode 100644 internal/examples/resources/citrix_admin_scope/import.sh rename internal/examples/resources/{citrix_daas_admin_scope => citrix_admin_scope}/resource.tf (88%) create mode 100644 internal/examples/resources/citrix_application/import.sh rename internal/examples/resources/{citrix_daas_application => citrix_application}/resource.tf (63%) create mode 100644 internal/examples/resources/citrix_application_folder/import.sh create mode 100644 internal/examples/resources/citrix_application_folder/resource.tf create mode 100644 internal/examples/resources/citrix_aws_hypervisor/import.sh rename internal/examples/resources/{citrix_daas_aws_hypervisor => citrix_aws_hypervisor}/resource.tf (78%) create mode 100644 internal/examples/resources/citrix_aws_hypervisor_resource_pool/import.sh rename internal/examples/resources/{citrix_daas_aws_hypervisor_resource_pool => citrix_aws_hypervisor_resource_pool}/resource.tf (51%) create mode 100644 internal/examples/resources/citrix_azure_hypervisor/import.sh rename internal/examples/resources/{citrix_daas_azure_hypervisor => citrix_azure_hypervisor}/resource.tf (81%) create mode 100644 internal/examples/resources/citrix_azure_hypervisor_resource_pool/import.sh rename internal/examples/resources/{citrix_daas_azure_hypervisor_resource_pool => citrix_azure_hypervisor_resource_pool}/resource.tf (60%) delete mode 100644 internal/examples/resources/citrix_daas_admin_role/import.sh delete mode 100644 internal/examples/resources/citrix_daas_admin_scope/import.sh delete mode 100644 internal/examples/resources/citrix_daas_application/import.sh delete mode 100644 internal/examples/resources/citrix_daas_application_folder/import.sh delete mode 100644 internal/examples/resources/citrix_daas_application_folder/resource.tf delete mode 100644 internal/examples/resources/citrix_daas_aws_hypervisor/import.sh delete mode 100644 internal/examples/resources/citrix_daas_aws_hypervisor_resource_pool/import.sh delete mode 100644 internal/examples/resources/citrix_daas_azure_hypervisor/import.sh delete mode 100644 internal/examples/resources/citrix_daas_azure_hypervisor_resource_pool/import.sh delete mode 100644 internal/examples/resources/citrix_daas_delivery_group/import.sh delete mode 100644 internal/examples/resources/citrix_daas_gcp_hypervisor/import.sh delete mode 100644 internal/examples/resources/citrix_daas_gcp_hypervisor_resource_pool/import.sh delete mode 100644 internal/examples/resources/citrix_daas_machine_catalog/import.sh delete mode 100644 internal/examples/resources/citrix_daas_zone/import.sh create mode 100644 internal/examples/resources/citrix_delivery_group/import.sh rename internal/examples/resources/{citrix_daas_delivery_group => citrix_delivery_group}/resource.tf (93%) create mode 100644 internal/examples/resources/citrix_gcp_hypervisor/import.sh rename internal/examples/resources/{citrix_daas_gcp_hypervisor => citrix_gcp_hypervisor}/resource.tf (77%) create mode 100644 internal/examples/resources/citrix_gcp_hypervisor_resource_pool/import.sh rename internal/examples/resources/{citrix_daas_gcp_hypervisor_resource_pool => citrix_gcp_hypervisor_resource_pool}/resource.tf (57%) create mode 100644 internal/examples/resources/citrix_machine_catalog/import.sh rename internal/examples/resources/{citrix_daas_machine_catalog => citrix_machine_catalog}/resource.tf (85%) create mode 100644 internal/examples/resources/citrix_nutanix_hypervisor/import.sh create mode 100644 internal/examples/resources/citrix_nutanix_hypervisor/resource.tf create mode 100644 internal/examples/resources/citrix_policy_set/import.sh create mode 100644 internal/examples/resources/citrix_policy_set/resource.tf create mode 100644 internal/examples/resources/citrix_vsphere_hypervisor/import.sh create mode 100644 internal/examples/resources/citrix_vsphere_hypervisor/resource.tf create mode 100644 internal/examples/resources/citrix_xenserver_hypervisor/import.sh create mode 100644 internal/examples/resources/citrix_xenserver_hypervisor/resource.tf create mode 100644 internal/examples/resources/citrix_xenserver_hypervisor_resource_pool/import.sh create mode 100644 internal/examples/resources/citrix_xenserver_hypervisor_resource_pool/resource.tf create mode 100644 internal/examples/resources/citrix_zone/import.sh rename internal/examples/resources/{citrix_daas_zone => citrix_zone}/resource.tf (81%) create mode 100644 internal/test/policy_set_resource_test.go diff --git a/README.md b/README.md index 6a6c0f0..04a05a3 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,8 @@ Citrix has developed a custom Terraform provider for automating Citrix product d - [Create a Machine Catalog](#create-a-machine-catalog) - [Create a Delivery Group](#create-a-delivery-group) - [Frequently Asked Questions](#frequently-asked-questions) + - [What resource is supported for different connection types?](#what-resource-is-supported-for-different-connection-types) + - [What provisioning types are supported for machine catalog?](#what-provisioning-types-are-supported-for-machine-catalog) - [Attributions](#attributions) - [License](#license) @@ -88,10 +90,10 @@ Below is a table to show the difference between on-premises and Cloud provider c Resources.tf can be used to configure the desired state of the resources that you want to create and manage in your Citrix Services. The example below shows how you can configure a Citrix DaaS Zone in Citrix DaaS service in resource.tf. -**`citrix_daas_zone`** +**`citrix_zone`** ```hcl -resource "citrix_daas_zone" "example-zone" { +resource "citrix_zone" "example-zone" { name = "example-zone" description = "zone example" metadata = [ @@ -103,7 +105,7 @@ resource "citrix_daas_zone" "example-zone" { } ``` -Please refer the Plugin for Terraform Provider for Citrix DaaS™ documentation such as [docs/resources/daas_zone.md](docs/resources/daas_zone.md) to find out the configurable properties of each type of resources, understand what they do, and what option values are supported. +Please refer the Plugin for Terraform Provider for Citrix DaaS™ documentation such as [docs/resources/zone.md](docs/resources/zone.md) to find out the configurable properties of each type of resources, understand what they do, and what option values are supported. --------- @@ -127,25 +129,49 @@ To find all the Citrix DaaS resources manageable via Terraform, understand all t ### Create a Zone in Citrix DaaS as the first step -Refer the [DaaS Zone documentation](docs/resources/daas_zone.md) to configure a zone via terraform. +Refer the [DaaS Zone documentation](docs/resources/zone.md) to configure a zone via terraform. ### Create a Hypervisor -Hypervisor is needed to use your preferred public cloud provider with Citrix DaaS. Refer the [DaaS Hypervisor documentation](docs/resources/daas_azure_hypervisor.md) to configure an Azure hypervisor in a zone via terraform. +Hypervisor is needed to use your preferred public cloud provider with Citrix DaaS. Refer the [DaaS Hypervisor documentation](docs/resources/azure_hypervisor.md) to configure an Azure hypervisor in a zone via terraform. ### Create a Hypervisor Resource Pool -The hypervisor resource pool defines the network configuration for a hypervisor connection. Refer the [DaaS Hypervisor Resource Pool documentaion](docs/resources/daas_hypervisor_resource_pool.md) to configure a hypervisr resource pool via terraform. +The hypervisor resource pool defines the network configuration for a hypervisor connection. Refer the [DaaS Hypervisor Resource Pool documentaion](docs/resources/hypervisor_resource_pool.md) to configure a hypervisr resource pool via terraform. ### Create a Machine Catalog -A machine catalog is a collection of machines managed as a single entity. Refer the [DaaS Machine Catalog documentation](docs/resources/daas_machine_catalog.md) to configure a machine catalog via terraform. +A machine catalog is a collection of machines managed as a single entity. Refer the [DaaS Machine Catalog documentation](docs/resources/machine_catalog.md) to configure a machine catalog via terraform. ### Create a Delivery Group -A delivery group is a collection of machines selected from one or more machine catalogs. The delivery group can also specify which users can use those machines, plus the applications and desktops available to those users. Refer the [DaaS Delivery Group documentation](docs/resources/daas_delivery_group.md) to configure a delivery group via terraform. +A delivery group is a collection of machines selected from one or more machine catalogs. The delivery group can also specify which users can use those machines, plus the applications and desktops available to those users. Refer the [DaaS Delivery Group documentation](docs/resources/delivery_group.md) to configure a delivery group via terraform. ## Frequently Asked Questions +#### What resource is supported for different connection types? + +| Connection Type | Hypervisor | Resource Pool | Machine Catalog | +|-----------------|------------------|------------------|---------------------| +| AzureRM |:heavy_check_mark:|:heavy_check_mark:| MCS / Power Managed | +| AWS EC2 |:heavy_check_mark:|:heavy_check_mark:| in progress | +| GCP |:heavy_check_mark:|:heavy_check_mark:| MCS / Power Managed | +| Vsphere |:heavy_check_mark:| in progress | Power Managed | +| XenServer |:heavy_check_mark:|:heavy_check_mark:| Power Managed | +| Nutanix |:heavy_check_mark:| in progress | Power Managed | + + +#### What provisioning types are supported for machine catalog? +- MCS provisioning + - Azure + - GCP +- Manual Power Managed + - Azure + - GCP + - AWS EC2 + - Vsphere + - XenServer + - Nutanix +- Manual / Remote PC ## Attributions The code in this repository makes use of the following packages: diff --git a/docs/data-sources/daas_admin_scope.md b/docs/data-sources/admin_scope.md similarity index 83% rename from docs/data-sources/daas_admin_scope.md rename to docs/data-sources/admin_scope.md index c1b8cd6..cee5a16 100644 --- a/docs/data-sources/daas_admin_scope.md +++ b/docs/data-sources/admin_scope.md @@ -1,12 +1,12 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "citrix_daas_admin_scope Data Source - citrix" +page_title: "citrix_admin_scope Data Source - citrix" subcategory: "" description: |- Data source to get details regarding a specific Administrator scope. --- -# citrix_daas_admin_scope (Data Source) +# citrix_admin_scope (Data Source) Data source to get details regarding a specific Administrator scope. @@ -14,12 +14,12 @@ Data source to get details regarding a specific Administrator scope. ```terraform # Get Admin Scope resource by name -data "citrix_daas_admin_scope" "test_scope_by_name" { +data "citrix_admin_scope" "test_scope_by_name" { name = "All" } # Get Admin Scope resource by id -data "citrix_daas_admin_scope" "test_scope_by_id" { +data "citrix_admin_scope" "test_scope_by_id" { id = "00000000-0000-0000-0000-000000000000" } ``` diff --git a/docs/data-sources/daas_application_folder_details.md b/docs/data-sources/application_folder_details.md similarity index 93% rename from docs/data-sources/daas_application_folder_details.md rename to docs/data-sources/application_folder_details.md index 9a793d8..57d7964 100644 --- a/docs/data-sources/daas_application_folder_details.md +++ b/docs/data-sources/application_folder_details.md @@ -1,12 +1,12 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "citrix_daas_application_folder_details Data Source - citrix" +page_title: "citrix_application_folder_details Data Source - citrix" subcategory: "" description: |- Data source for retrieving details of applications belonging to a specific folder. --- -# citrix_daas_application_folder_details (Data Source) +# citrix_application_folder_details (Data Source) Data source for retrieving details of applications belonging to a specific folder. diff --git a/docs/data-sources/daas_vda.md b/docs/data-sources/vda.md similarity index 88% rename from docs/data-sources/daas_vda.md rename to docs/data-sources/vda.md index 1b8892d..639b239 100644 --- a/docs/data-sources/daas_vda.md +++ b/docs/data-sources/vda.md @@ -1,12 +1,12 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "citrix_daas_vda Data Source - citrix" +page_title: "citrix_vda Data Source - citrix" subcategory: "" description: |- Data source for the list of VDAs that belong to either a machine catalog or a delivery group. Machine catalog and delivery group cannot be specified at the same time. --- -# citrix_daas_vda (Data Source) +# citrix_vda (Data Source) Data source for the list of VDAs that belong to either a machine catalog or a delivery group. Machine catalog and delivery group cannot be specified at the same time. @@ -14,12 +14,12 @@ Data source for the list of VDAs that belong to either a machine catalog or a de ```terraform # Get VDA resource by machine catalog Name or Id -data "citrix_daas_vda" "vda_by_machine_catalog" { +data "citrix_vda" "vda_by_machine_catalog" { machine_catalog = "{MachineCatalog Name or Id}" } # Get VDA resource by delivery group Name or Id -data "citrix_daas_vda" "vda_by_delivery_group" { +data "citrix_vda" "vda_by_delivery_group" { delivery_group = "{DeliveryGroup Name or Id}" } ``` diff --git a/docs/resources/daas_admin_role.md b/docs/resources/admin_role.md similarity index 87% rename from docs/resources/daas_admin_role.md rename to docs/resources/admin_role.md index 62a5a28..e602c72 100644 --- a/docs/resources/daas_admin_role.md +++ b/docs/resources/admin_role.md @@ -1,25 +1,25 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "citrix_daas_admin_role Resource - citrix" +page_title: "citrix_admin_role Resource - citrix" subcategory: "" description: |- Manages an Administrator role. --- -# citrix_daas_admin_role (Resource) +# citrix_admin_role (Resource) Manages an Administrator role. ## Example Usage ```terraform -resource "citrix_daas_admin_role" "on_prem_example_role" { +resource "citrix_admin_role" "on_prem_example_role" { name = "on_prem_admin_role" description = "Example admin role for citrix onprem" permissions = ["AppGroupApplications_ChangeTags"] } -resource "citrix_daas_admin_role" "cloud_example_role" { +resource "citrix_admin_role" "cloud_example_role" { name = "cloud_admin_role" can_launch_manage = false can_launch_monitor = true @@ -57,5 +57,5 @@ Import is supported using the following syntax: ```shell # Admin Role can be imported by specifying the GUID -terraform import citrix_daas_admin_role.example-admin-role 00000000-0000-0000-0000-000000000000 +terraform import citrix_admin_role.example-admin-role 00000000-0000-0000-0000-000000000000 ``` diff --git a/docs/resources/daas_admin_scope.md b/docs/resources/admin_scope.md similarity index 86% rename from docs/resources/daas_admin_scope.md rename to docs/resources/admin_scope.md index 5c8e6bc..8a15e00 100644 --- a/docs/resources/daas_admin_scope.md +++ b/docs/resources/admin_scope.md @@ -1,19 +1,19 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "citrix_daas_admin_scope Resource - citrix" +page_title: "citrix_admin_scope Resource - citrix" subcategory: "" description: |- Manages an Administrator scope. --- -# citrix_daas_admin_scope (Resource) +# citrix_admin_scope (Resource) Manages an Administrator scope. ## Example Usage ```terraform -resource "citrix_daas_admin_scope" "example-admin-scope" { +resource "citrix_admin_scope" "example-admin-scope" { name = "example-admin-scope" description = "Example admin scope for delivery group and machine catalog" scoped_objects = [ @@ -59,5 +59,5 @@ Import is supported using the following syntax: ```shell # Admin Scope can be imported by specifying the GUID -terraform import citrix_daas_admin_scope.example-admin-scope 00000000-0000-0000-0000-000000000000 +terraform import citrix_admin_scope.example-admin-scope 00000000-0000-0000-0000-000000000000 ``` diff --git a/docs/resources/daas_application.md b/docs/resources/application.md similarity index 81% rename from docs/resources/daas_application.md rename to docs/resources/application.md index d7d23c9..c8b844c 100644 --- a/docs/resources/daas_application.md +++ b/docs/resources/application.md @@ -1,29 +1,29 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "citrix_daas_application Resource - citrix" +page_title: "citrix_application Resource - citrix" subcategory: "" description: |- Resource for creating and managing applications. --- -# citrix_daas_application (Resource) +# citrix_application (Resource) Resource for creating and managing applications. ## Example Usage ```terraform -resource "citrix_daas_application" "example-application" { +resource "citrix_application" "example-application" { name = "example-name" description = "example-description" published_name = "example-published-name" - application_folder_path = citrix_daas_application_folder.example-application-folder-1.path + application_folder_path = citrix_application_folder.example-application-folder-1.path installed_app_properties = { command_line_arguments = "" command_line_executable = "" working_directory = "" } - delivery_groups = [citrix_daas_delivery_group.example-delivery-group.id] + delivery_groups = [citrix_delivery_group.example-delivery-group.id] } ``` @@ -64,5 +64,5 @@ Import is supported using the following syntax: ```shell # Application can be imported by specifying the GUID -terraform import citrix_daas_application.example-application b620d505-0d0d-43b1-8c94-5cb21c5ab40d +terraform import citrix_application.example-application b620d505-0d0d-43b1-8c94-5cb21c5ab40d ``` diff --git a/docs/resources/daas_application_folder.md b/docs/resources/application_folder.md similarity index 62% rename from docs/resources/daas_application_folder.md rename to docs/resources/application_folder.md index ddcef9b..5a166fd 100644 --- a/docs/resources/daas_application_folder.md +++ b/docs/resources/application_folder.md @@ -1,25 +1,25 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "citrix_daas_application_folder Resource - citrix" +page_title: "citrix_application_folder Resource - citrix" subcategory: "" description: |- Manages an application folder. --- -# citrix_daas_application_folder (Resource) +# citrix_application_folder (Resource) Manages an application folder. ## Example Usage ```terraform -resource "citrix_daas_application_folder" "example-application-folder-1" { +resource "citrix_application_folder" "example-application-folder-1" { name = "example-application-folder-1" } -resource "citrix_daas_application_folder" "example-application-folder-2" { +resource "citrix_application_folder" "example-application-folder-2" { name = "example-application-folder-2" - parent_path = citrix_daas_application_folder.example-application-folder-1.path + parent_path = citrix_application_folder.example-application-folder-1.path } ``` @@ -45,5 +45,5 @@ Import is supported using the following syntax: ```shell # Application Folder can be imported by specifying the GUID -terraform import citrix_daas_application_folder.example-application-folder-1 cd0a00da-dda8-4ba6-a686-936f2c7a3adf +terraform import citrix_application_folder.example-application-folder-1 cd0a00da-dda8-4ba6-a686-936f2c7a3adf ``` diff --git a/docs/resources/daas_aws_hypervisor.md b/docs/resources/aws_hypervisor.md similarity index 79% rename from docs/resources/daas_aws_hypervisor.md rename to docs/resources/aws_hypervisor.md index 1f82c45..6a6e29f 100644 --- a/docs/resources/daas_aws_hypervisor.md +++ b/docs/resources/aws_hypervisor.md @@ -1,12 +1,12 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "citrix_daas_aws_hypervisor Resource - citrix" +page_title: "citrix_aws_hypervisor Resource - citrix" subcategory: "" description: |- Manages an AWS hypervisor. --- -# citrix_daas_aws_hypervisor (Resource) +# citrix_aws_hypervisor (Resource) Manages an AWS hypervisor. @@ -14,7 +14,7 @@ Manages an AWS hypervisor. ```terraform # AWS Hypervisor -resource "citrix_daas_aws_hypervisor" "example-aws-hypervisor" { +resource "citrix_aws_hypervisor" "example-aws-hypervisor" { name = "example-aws-hypervisor" zone = "" api_key = "" @@ -44,5 +44,5 @@ Import is supported using the following syntax: ```shell # AWS Hypervisor can be imported by specifying the GUID -terraform import citrix_daas_aws_hypervisor.example-aws-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e +terraform import citrix_aws_hypervisor.example-aws-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e ``` diff --git a/docs/resources/daas_aws_hypervisor_resource_pool.md b/docs/resources/aws_hypervisor_resource_pool.md similarity index 71% rename from docs/resources/daas_aws_hypervisor_resource_pool.md rename to docs/resources/aws_hypervisor_resource_pool.md index 0f0e784..778a669 100644 --- a/docs/resources/daas_aws_hypervisor_resource_pool.md +++ b/docs/resources/aws_hypervisor_resource_pool.md @@ -1,21 +1,21 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "citrix_daas_aws_hypervisor_resource_pool Resource - citrix" +page_title: "citrix_aws_hypervisor_resource_pool Resource - citrix" subcategory: "" description: |- Manages a hypervisor resource pool. --- -# citrix_daas_aws_hypervisor_resource_pool (Resource) +# citrix_aws_hypervisor_resource_pool (Resource) Manages a hypervisor resource pool. ## Example Usage ```terraform -resource "citrix_daas_aws_hypervisor_resource_pool" "example-aws-hypervisor-resource-pool" { +resource "citrix_aws_hypervisor_resource_pool" "example-aws-hypervisor-resource-pool" { name = "example-aws-hypervisor-resource-pool" - hypervisor = citrix_daas_aws_hypervisor.example-aws-hypervisor.id + hypervisor = citrix_aws_hypervisor.example-aws-hypervisor.id subnets = [ "10.0.1.0/24", ] @@ -45,5 +45,5 @@ Import is supported using the following syntax: ```shell # Hypervisor Resource Pool can be imported with the format HypervisorId,HypervisorResourcePoolId -terraform import citrix_daas_aws_hypervisor_resource_pool.example-aws-hypervisor-resource-pool sbf0dc45-5c42-45a0-a15d-a3df4ff5da8c,ce571dd9-1a46-4b85-891c-484423322c53 +terraform import citrix_aws_hypervisor_resource_pool.example-aws-hypervisor-resource-pool sbf0dc45-5c42-45a0-a15d-a3df4ff5da8c,ce571dd9-1a46-4b85-891c-484423322c53 ``` diff --git a/docs/resources/daas_azure_hypervisor.md b/docs/resources/azure_hypervisor.md similarity index 83% rename from docs/resources/daas_azure_hypervisor.md rename to docs/resources/azure_hypervisor.md index 7a7b5e6..a39da7a 100644 --- a/docs/resources/daas_azure_hypervisor.md +++ b/docs/resources/azure_hypervisor.md @@ -1,12 +1,12 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "citrix_daas_azure_hypervisor Resource - citrix" +page_title: "citrix_azure_hypervisor Resource - citrix" subcategory: "" description: |- Manages an Azure hypervisor. --- -# citrix_daas_azure_hypervisor (Resource) +# citrix_azure_hypervisor (Resource) Manages an Azure hypervisor. @@ -14,7 +14,7 @@ Manages an Azure hypervisor. ```terraform # Azure Hypervisor -resource "citrix_daas_azure_hypervisor" "example-azure-hypervisor" { +resource "citrix_azure_hypervisor" "example-azure-hypervisor" { name = "example-azure-hypervisor" zone = "" active_directory_id = "" @@ -50,5 +50,5 @@ Import is supported using the following syntax: ```shell # Azure Hypervisor can be imported by specifying the GUID -terraform import citrix_daas_azure_hypervisor.example-azure-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e +terraform import citrix_azure_hypervisor.example-azure-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e ``` diff --git a/docs/resources/daas_azure_hypervisor_resource_pool.md b/docs/resources/azure_hypervisor_resource_pool.md similarity index 73% rename from docs/resources/daas_azure_hypervisor_resource_pool.md rename to docs/resources/azure_hypervisor_resource_pool.md index 153a9a0..9e52fe2 100644 --- a/docs/resources/daas_azure_hypervisor_resource_pool.md +++ b/docs/resources/azure_hypervisor_resource_pool.md @@ -1,21 +1,21 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "citrix_daas_azure_hypervisor_resource_pool Resource - citrix" +page_title: "citrix_azure_hypervisor_resource_pool Resource - citrix" subcategory: "" description: |- Manages an Azure hypervisor resource pool. --- -# citrix_daas_azure_hypervisor_resource_pool (Resource) +# citrix_azure_hypervisor_resource_pool (Resource) Manages an Azure hypervisor resource pool. ## Example Usage ```terraform -resource "citrix_daas_azure_hypervisor_resource_pool" "example-azure-hypervisor-resource-pool" { +resource "citrix_azure_hypervisor_resource_pool" "example-azure-hypervisor-resource-pool" { name = "example-azure-hypervisor-resource-pool" - hypervisor = citrix_daas_azure_hypervisor.example-azure-hypervisor.id + hypervisor = citrix_azure_hypervisor.example-azure-hypervisor.id region = "East US" virtual_network_resource_group = "" virtual_network = "" @@ -48,5 +48,5 @@ Import is supported using the following syntax: ```shell # Hypervisor Resource Pool can be imported with the format HypervisorId,HypervisorResourcePoolId -terraform import citrix_daas_azure_hypervisor_resource_pool.example-azure-hypervisor-resource-pool sbf0dc45-5c42-45a0-a15d-a3df4ff5da8c,ce571dd9-1a46-4b85-891c-484423322c53 +terraform import citrix_azure_hypervisor_resource_pool.example-azure-hypervisor-resource-pool sbf0dc45-5c42-45a0-a15d-a3df4ff5da8c,ce571dd9-1a46-4b85-891c-484423322c53 ``` diff --git a/docs/resources/daas_delivery_group.md b/docs/resources/delivery_group.md similarity index 97% rename from docs/resources/daas_delivery_group.md rename to docs/resources/delivery_group.md index e3fb371..a4490bd 100644 --- a/docs/resources/daas_delivery_group.md +++ b/docs/resources/delivery_group.md @@ -1,23 +1,23 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "citrix_daas_delivery_group Resource - citrix" +page_title: "citrix_delivery_group Resource - citrix" subcategory: "" description: |- Manages a delivery group. --- -# citrix_daas_delivery_group (Resource) +# citrix_delivery_group (Resource) Manages a delivery group. ## Example Usage ```terraform -resource "citrix_daas_delivery_group" "example-delivery-group" { +resource "citrix_delivery_group" "example-delivery-group" { name = "example-delivery-group" associated_machine_catalogs = [ { - machine_catalog = citrix_daas_machine_catalog.example-azure-mtsession.id + machine_catalog = citrix_machine_catalog.example-azure-mtsession.id machine_count = 1 } ] @@ -114,6 +114,7 @@ resource "citrix_daas_delivery_group" "example-delivery-group" { } ] + policy_set_id = citrix_policy_set.example-policy-set.id } ``` @@ -131,6 +132,7 @@ resource "citrix_daas_delivery_group" "example-delivery-group" { - `autoscale_settings` (Attributes) The power management settings governing the machine(s) in the delivery group. (see [below for nested schema](#nestedatt--autoscale_settings)) - `description` (String) Description of the delivery group. - `desktops` (Attributes List) A list of Desktop resources to publish on the delivery group. Only 1 desktop can be added to a Remote PC Delivery Group. (see [below for nested schema](#nestedatt--desktops)) +- `policy_set_id` (String) GUID identifier of the policy set. - `reboot_schedules` (Attributes List) The reboot schedule for the delivery group. (see [below for nested schema](#nestedatt--reboot_schedules)) - `restricted_access_users` (Attributes) Restrict access to this Delivery Group by specifying users and groups in the allow and block list. If no value is specified, all authenticated users will have access to this Delivery Group. To give access to unauthenticated users, use the `allow_anonymous_access` property. (see [below for nested schema](#nestedatt--restricted_access_users)) @@ -279,5 +281,5 @@ Import is supported using the following syntax: ```shell # Delivery Group can be imported by specifying the GUID -terraform import citrix_daas_delivery_group.example-delivery-group a92ac0d6-9a0f-477a-a504-07cae8fccb81 +terraform import citrix_delivery_group.example-delivery-group a92ac0d6-9a0f-477a-a504-07cae8fccb81 ``` diff --git a/docs/resources/daas_gcp_hypervisor.md b/docs/resources/gcp_hypervisor.md similarity index 79% rename from docs/resources/daas_gcp_hypervisor.md rename to docs/resources/gcp_hypervisor.md index d20d6bf..808ff20 100644 --- a/docs/resources/daas_gcp_hypervisor.md +++ b/docs/resources/gcp_hypervisor.md @@ -1,12 +1,12 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "citrix_daas_gcp_hypervisor Resource - citrix" +page_title: "citrix_gcp_hypervisor Resource - citrix" subcategory: "" description: |- Manages a GCP hypervisor. --- -# citrix_daas_gcp_hypervisor (Resource) +# citrix_gcp_hypervisor (Resource) Manages a GCP hypervisor. @@ -14,7 +14,7 @@ Manages a GCP hypervisor. ```terraform # GCP Hypervisor -resource "citrix_daas_gcp_hypervisor" "example-gcp-hypervisor" { +resource "citrix_gcp_hypervisor" "example-gcp-hypervisor" { name = "example-gcp-hypervisor" zone = "" service_account_id = "" @@ -42,5 +42,5 @@ Import is supported using the following syntax: ```shell # Hypervisor can be imported by specifying the GUID -terraform import citrix_daas_gcp_hypervisor.example-gcp-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e +terraform import citrix_gcp_hypervisor.example-gcp-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e ``` diff --git a/docs/resources/daas_gcp_hypervisor_resource_pool.md b/docs/resources/gcp_hypervisor_resource_pool.md similarity index 73% rename from docs/resources/daas_gcp_hypervisor_resource_pool.md rename to docs/resources/gcp_hypervisor_resource_pool.md index 56cb8d4..cd8f160 100644 --- a/docs/resources/daas_gcp_hypervisor_resource_pool.md +++ b/docs/resources/gcp_hypervisor_resource_pool.md @@ -1,21 +1,21 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "citrix_daas_gcp_hypervisor_resource_pool Resource - citrix" +page_title: "citrix_gcp_hypervisor_resource_pool Resource - citrix" subcategory: "" description: |- Manages a hypervisor resource pool. --- -# citrix_daas_gcp_hypervisor_resource_pool (Resource) +# citrix_gcp_hypervisor_resource_pool (Resource) Manages a hypervisor resource pool. ## Example Usage ```terraform -resource "citrix_daas_gcp_hypervisor_resource_pool" "example-gcp-hypervisor-resource-pool" { +resource "citrix_gcp_hypervisor_resource_pool" "example-gcp-hypervisor-resource-pool" { name = "example-gcp-hypervisor-resource-pool" - hypervisor = citrix_daas_gcp_hypervisor.example-gcp-hypervisor.id + hypervisor = citrix_gcp_hypervisor.example-gcp-hypervisor.id project_name = "10000-example-gcp-project" region = "us-east1" subnets = [ @@ -51,5 +51,5 @@ Import is supported using the following syntax: ```shell # Hypervisor Resource Pool can be imported with the format HypervisorId,HypervisorResourcePoolId -terraform import citrix_daas_gcp_hypervisor_resource_pool.example-gcp-hypervisor-resource-pool sbf0dc45-5c42-45a0-a15d-a3df4ff5da8c,ce571dd9-1a46-4b85-891c-484423322c53 +terraform import citrix_gcp_hypervisor_resource_pool.example-gcp-hypervisor-resource-pool sbf0dc45-5c42-45a0-a15d-a3df4ff5da8c,ce571dd9-1a46-4b85-891c-484423322c53 ``` diff --git a/docs/resources/daas_machine_catalog.md b/docs/resources/machine_catalog.md similarity index 91% rename from docs/resources/daas_machine_catalog.md rename to docs/resources/machine_catalog.md index b8a4f84..3cfd33c 100644 --- a/docs/resources/daas_machine_catalog.md +++ b/docs/resources/machine_catalog.md @@ -1,19 +1,19 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "citrix_daas_machine_catalog Resource - citrix" +page_title: "citrix_machine_catalog Resource - citrix" subcategory: "" description: |- Manages a machine catalog. --- -# citrix_daas_machine_catalog (Resource) +# citrix_machine_catalog (Resource) Manages a machine catalog. ## Example Usage ```terraform -resource "citrix_daas_machine_catalog" "example-azure-mtsession" { +resource "citrix_machine_catalog" "example-azure-mtsession" { name = "example-azure-mtsession" description = "Example multi-session catalog on Azure hypervisor" zone = "" @@ -23,8 +23,8 @@ resource "citrix_daas_machine_catalog" "example-azure-mtsession" { is_remote_pc = false provisioning_type = "MCS" provisioning_scheme = { - hypervisor = citrix_daas_azure_hypervisor.example-azure-hypervisor.id - hypervisor_resource_pool = citrix_daas_hypervisor_resource_pool.example-azure-hypervisor-resource-pool.id + hypervisor = citrix_azure_hypervisor.example-azure-hypervisor.id + hypervisor_resource_pool = citrix_hypervisor_resource_pool.example-azure-hypervisor-resource-pool.id identity_type = "ActiveDirectory" machine_domain_identity = { domain = "" @@ -62,7 +62,7 @@ resource "citrix_daas_machine_catalog" "example-azure-mtsession" { } } -resource "citrix_daas_machine_catalog" "example-gcp-mtsession" { +resource "citrix_machine_catalog" "example-gcp-mtsession" { name = "example-gcp-mtsession" description = "Example multi-session catalog on GCP hypervisor" zone = "" @@ -72,8 +72,8 @@ resource "citrix_daas_machine_catalog" "example-gcp-mtsession" { is_remote_pc = false provisioning_type = "MCS" provisioning_scheme = { - hypervisor = citrix_daas_gcp_hypervisor.example-gcp-hypervisor.id - hypervisor_resource_pool = citrix_daas_hypervisor_resource_pool.example-gcp-hypervisor-resource-pool.id + hypervisor = citrix_gcp_hypervisor.example-gcp-hypervisor.id + hypervisor_resource_pool = citrix_hypervisor_resource_pool.example-gcp-hypervisor-resource-pool.id identity_type = "ActiveDirectory" machine_domain_identity = { domain = "" @@ -103,7 +103,7 @@ resource "citrix_daas_machine_catalog" "example-gcp-mtsession" { } } -resource "citrix_daas_machine_catalog" "example-manual-power-managed-mtsession" { +resource "citrix_machine_catalog" "example-manual-power-managed-mtsession" { name = "example-manual-power-managed-mtsession" description = "Example manual power managed multi-session catalog" zone = "" @@ -114,7 +114,7 @@ resource "citrix_daas_machine_catalog" "example-manual-power-managed-mtsession" provisioning_type = "Manual" machine_accounts = [ { - hypervisor = citrix_daas_azure_hypervisor.example-azure-hypervisor.id + hypervisor = citrix_azure_hypervisor.example-azure-hypervisor.id machines = [ { region = "East US" @@ -126,7 +126,7 @@ resource "citrix_daas_machine_catalog" "example-manual-power-managed-mtsession" ] } -resource "citrix_daas_machine_catalog" "example-manual-non-power-managed-mtsession" { +resource "citrix_machine_catalog" "example-manual-non-power-managed-mtsession" { name = "example-manual-non-power-managed-mtsession" description = "Example manual non power managed multi-session catalog" zone = "" @@ -149,7 +149,7 @@ resource "citrix_daas_machine_catalog" "example-manual-non-power-managed-mtsessi ] } -resource "citrix_daas_machine_catalog" "example-remote-pc" { +resource "citrix_machine_catalog" "example-remote-pc" { name = "example-remote-pc-catalog" description = "Example Remote PC catalog" zone = "" @@ -220,11 +220,15 @@ Optional: Required: -- `machine_name` (String) The name of the machine. Must be in format DOMAIN\MACHINE. +- `machine_account` (String) The Computer AD Account for the machine. Must be in the format DOMAIN\MACHINE. Optional: - `availability_zone` (String) **[AWS: Required]** The availability zone in which the machine resides. Required only if `is_power_managed = true` +- `cluster` (String) **[VSphere: Optional]** The cluster in which the machine resides. To be used only if `is_power_managed = true` +- `datacenter` (String) **[VSphere: Required]** The datacenter in which the machine resides. Required only if `is_power_managed = true` +- `host` (String) **[VSphere: Required]** The IP address or FQDN of the host in which the machine resides. Required only if `is_power_managed = true` +- `machine_name` (String) The name of the machine. Required only if `is_power_managed = true` - `project_name` (String) **[GCP: Required]** The project name in which the machine resides. Required only if `is_power_managed = true` - `region` (String) **[Azure, GCP: Required]** The region in which the machine resides. Required only if `is_power_managed = true` - `resource_group_name` (String) **[Azure: Required]** The resource group in which the machine resides. Required only if `is_power_managed = true` @@ -238,7 +242,7 @@ Required: - `hypervisor` (String) Id of the hypervisor for creating the machines. Required only if using power managed machines. - `hypervisor_resource_pool` (String) Id of the hypervisor resource pool that will be used for provisioning operations. -- `identity_type` (String) The identity type of the machines to be created. Supported values are`ActiveDirectory` and `AzureAD`. +- `identity_type` (String) The identity type of the machines to be created. Supported values are`ActiveDirectory`, `AzureAD`, and `HybridAzureAD`. - `machine_account_creation_rules` (Attributes) Rules specifying how Active Directory machine accounts should be created when machines are provisioned. (see [below for nested schema](#nestedatt--provisioning_scheme--machine_account_creation_rules)) - `number_of_total_machines` (Number) Number of VDA machines allocated in the catalog. @@ -400,5 +404,5 @@ Import is supported using the following syntax: ```shell # Machine catalog can be imported by specifying the GUID -terraform import citrix_daas_machine_catalog.example b2339edf-7b00-436e-9c3a-54c987c3526e +terraform import citrix_machine_catalog.example b2339edf-7b00-436e-9c3a-54c987c3526e ``` diff --git a/docs/resources/nutanix_hypervisor.md b/docs/resources/nutanix_hypervisor.md new file mode 100644 index 0000000..ad19429 --- /dev/null +++ b/docs/resources/nutanix_hypervisor.md @@ -0,0 +1,57 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "citrix_nutanix_hypervisor Resource - citrix" +subcategory: "" +description: |- + Manages a Nutanix hypervisor. +--- + +# citrix_nutanix_hypervisor (Resource) + +Manages a Nutanix hypervisor. + +## Example Usage + +```terraform +# Nutanix Hypervisor +resource "citrix_nutanix_hypervisor" "example-nutanix-hypervisor" { + name = "example-nutanix-hypervisor" + zone = "" + username = "" + password = "" + password_format = "Plaintext" + addresses = ["10.122.36.26"] + max_absolute_active_actions = 20 +} +``` + + +## Schema + +### Required + +- `addresses` (List of String) Hypervisor address(es). At least one is required. +- `name` (String) Name of the hypervisor. +- `password` (String) Password of the hypervisor. +- `password_format` (String) Password format of the hypervisor. Choose between Base64 and PlainText. +- `username` (String) Username of the hypervisor. +- `zone` (String) Id of the zone the hypervisor is associated with. + +### Optional + +- `max_absolute_active_actions` (Number) Maximum number of actions that can execute in parallel on the hypervisor. Default is 100. +- `max_absolute_new_actions_per_minute` (Number) Maximum number of actions that can be started on the hypervisor per-minute. Default is 10. +- `max_power_actions_percentage_of_machines` (Number) Maximum percentage of machines on the hypervisor which can have their power state changed simultaneously. Default is 20. + +### Read-Only + +- `id` (String) GUID identifier of the hypervisor. + +## Import + +Import is supported using the following syntax: + +```shell +# Nutanix Hypervisor can be imported by specifying the GUID +terraform import citrix_nutanix_hypervisor.example-nutanix-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e +``` diff --git a/docs/resources/policy_set.md b/docs/resources/policy_set.md new file mode 100644 index 0000000..ee41d87 --- /dev/null +++ b/docs/resources/policy_set.md @@ -0,0 +1,119 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "citrix_policy_set Resource - citrix" +subcategory: "" +description: |- + Manages +--- + +# citrix_policy_set (Resource) + +Manages + +## Example Usage + +```terraform +resource "citrix_policy_set" "example-policy-set" { + name = "example-policy-set" + description = "This is an example policy set description" + type = "DeliveryGroupPolicies" + scopes = [ "All", citrix_admin_scope.example-admin-scope.name ] + policies = [ + { + name = "test-policy-with-priority-0" + description = "Test policy in the example policy set with priority 0" + is_enabled = true + policy_settings = [ + { + name = "AdvanceWarningPeriod" + value = "13:00:00" + use_default = false + }, + ] + policy_filters = [ + { + type = "DesktopGroup" + data = jsonencode({ + "server" = "20.185.46.142" + "uuid" = citrix_policy_set.example-delivery-group.id + }) + is_enabled = true + is_allowed = true + }, + ] + }, + { + name = "test-policy-with-priority-1" + description = "Test policy in the example policy set with priority 1" + is_enabled = false + policy_settings = [] + policy_filters = [] + } + ] +} +``` + + +## Schema + +### Required + +- `name` (String) Name of the policy set. +- `policies` (Attributes List) Ordered list of policies. (see [below for nested schema](#nestedatt--policies)) +- `scopes` (Set of String) The names of the scopes for the policy set to apply on. +- `type` (String) Type of the policy set. Type can be one of `SitePolicies`, `DeliveryGroupPolicies`, `SiteTemplates`, or `CustomTemplates`. + +### Optional + +- `description` (String) Description of the policy set. + +### Read-Only + +- `id` (String) GUID identifier of the policy set. +- `is_assigned` (Boolean) Indicate whether the policy set is being assigned to delivery groups. + + +### Nested Schema for `policies` + +Required: + +- `is_enabled` (Boolean) Indicate whether the policy is being enabled. +- `name` (String) Name of the policy. +- `policy_filters` (Attributes Set) Set of policy filters. (see [below for nested schema](#nestedatt--policies--policy_filters)) +- `policy_settings` (Attributes Set) Set of policy settings. (see [below for nested schema](#nestedatt--policies--policy_settings)) + +Optional: + +- `description` (String) Description of the policy. + + +### Nested Schema for `policies.policy_filters` + +Required: + +- `is_allowed` (Boolean) Indicate the filtered policy is allowed or denied if the filter condition is met. +- `is_enabled` (Boolean) Indicate whether the policy is being enabled. +- `type` (String) Type of the policy filter. Type can be one of `AccessControl`, `BranchRepeater`, `ClientIP`, `ClientName`, `DesktopGroup`, `DesktopKind`, `OU`, `User`, and `DesktopTag` + +Optional: + +- `data` (String) Data of the policy filter. + + + +### Nested Schema for `policies.policy_settings` + +Required: + +- `name` (String) Name of the policy setting name. +- `use_default` (Boolean) Indicate whether using default value for the policy setting. +- `value` (String) Value of the policy setting. + +## Import + +Import is supported using the following syntax: + +```shell +# Policy and Policy Set Association can be imported by specifying the Policy GUID +terraform import citrix_policy_set.example 00000000-0000-0000-0000-000000000000 +``` diff --git a/docs/resources/vsphere_hypervisor.md b/docs/resources/vsphere_hypervisor.md new file mode 100644 index 0000000..4994d3d --- /dev/null +++ b/docs/resources/vsphere_hypervisor.md @@ -0,0 +1,58 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "citrix_vsphere_hypervisor Resource - citrix" +subcategory: "" +description: |- + Manages a Vsphere hypervisor. +--- + +# citrix_vsphere_hypervisor (Resource) + +Manages a Vsphere hypervisor. + +## Example Usage + +```terraform +# Vsphere Hypervisor +resource "citrix_vsphere_hypervisor" "example-vsphere-hypervisor" { + name = "example-vsphere-hypervisor" + zone = "" + username = "" + password = "" + password_format = "Plaintext" + addresses = ["https://10.36.122.45"] + max_absolute_active_actions = 20 +} +``` + + +## Schema + +### Required + +- `addresses` (List of String) Hypervisor address(es). At least one is required. +- `name` (String) Name of the hypervisor. +- `password` (String) Password of the hypervisor. +- `password_format` (String) Password format of the hypervisor. Choose between Base64 and PlainText. +- `username` (String) Username of the hypervisor. +- `zone` (String) Id of the zone the hypervisor is associated with. + +### Optional + +- `max_absolute_active_actions` (Number) Maximum number of actions that can execute in parallel on the hypervisor. Default is 40. +- `max_absolute_new_actions_per_minute` (Number) Maximum number of actions that can be started on the hypervisor per-minute. Default is 10. +- `max_power_actions_percentage_of_machines` (Number) Maximum percentage of machines on the hypervisor which can have their power state changed simultaneously. Default is 20. +- `ssl_thumbprints` (List of String) SSL certificate thumbprints to consider acceptable for this connection. If not specified, and the hypervisor uses SSL for its connection, the SSL certificate's root certification authority and any intermediate certificates must be trusted. + +### Read-Only + +- `id` (String) GUID identifier of the hypervisor. + +## Import + +Import is supported using the following syntax: + +```shell +# Vsphere Hypervisor can be imported by specifying the GUID +terraform import citrix_vsphere_hypervisor.example-vsphere-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e +``` diff --git a/docs/resources/xenserver_hypervisor.md b/docs/resources/xenserver_hypervisor.md new file mode 100644 index 0000000..55aa2d0 --- /dev/null +++ b/docs/resources/xenserver_hypervisor.md @@ -0,0 +1,62 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "citrix_xenserver_hypervisor Resource - citrix" +subcategory: "" +description: |- + Manages a XenServer hypervisor. +--- + +# citrix_xenserver_hypervisor (Resource) + +Manages a XenServer hypervisor. + +## Example Usage + +```terraform +# XenServer Hypervisor +resource "citrix_xenserver_hypervisor" "example-xenserver-hypervisor" { + name = "example-xenserver-hypervisor" + zone = "" + username = "" + password = "" + password_format = "PlainText" + addresses = [ + "http://" + ] + ssl_thumbprints = [ + "" + ] +} +``` + + +## Schema + +### Required + +- `addresses` (List of String) Hypervisor address(es). At least one is required. +- `name` (String) Name of the hypervisor. +- `password` (String) Password of the hypervisor. +- `password_format` (String) Password format of the hypervisor. Choose between Base64 and PlainText. +- `username` (String) Username of the hypervisor. +- `zone` (String) Id of the zone the hypervisor is associated with. + +### Optional + +- `max_absolute_active_actions` (Number) Maximum number of actions that can execute in parallel on the hypervisor. Default is 40. +- `max_absolute_new_actions_per_minute` (Number) Maximum number of actions that can be started on the hypervisor per-minute. Default is 10. +- `max_power_actions_percentage_of_machines` (Number) Maximum percentage of machines on the hypervisor which can have their power state changed simultaneously. Default is 20. +- `ssl_thumbprints` (List of String) SSL certificate thumbprints to consider acceptable for this connection. If not specified, and the hypervisor uses SSL for its connection, the SSL certificate's root certification authority and any intermediate certificates must be trusted. + +### Read-Only + +- `id` (String) GUID identifier of the hypervisor. + +## Import + +Import is supported using the following syntax: + +```shell +# Hypervisor can be imported by specifying the GUID +terraform import citrix_xenserver_hypervisor.example-xenserver-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e +``` diff --git a/docs/resources/xenserver_hypervisor_resource_pool.md b/docs/resources/xenserver_hypervisor_resource_pool.md new file mode 100644 index 0000000..3db4048 --- /dev/null +++ b/docs/resources/xenserver_hypervisor_resource_pool.md @@ -0,0 +1,59 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "citrix_xenserver_hypervisor_resource_pool Resource - citrix" +subcategory: "" +description: |- + Manages an XenServer hypervisor resource pool. +--- + +# citrix_xenserver_hypervisor_resource_pool (Resource) + +Manages an XenServer hypervisor resource pool. + +## Example Usage + +```terraform +resource "citrix_xenserver_hypervisor_resource_pool" "example-xenserver-hypervisor-resource-pool" { + name = "example-xenserver-hypervisor-resource-pool" + hypervisor = citrix_xenserver_hypervisor.example-xenserver-hypervisor.id + networks = [ + "", + "" + ] + storage = [ + "" + ] + temporary_storage = [ + "" + ] + use_local_storage_caching = false +} +``` + + +## Schema + +### Required + +- `hypervisor` (String) Id of the hypervisor for which the resource pool needs to be created. +- `name` (String) Name of the resource pool. Name should be unique across all hypervisors. +- `networks` (List of String) List of networks for allocating resources. +- `storage` (List of String) List of hypervisor storage to use for OS data. +- `temporary_storage` (List of String) List of hypervisor storage to use for temporary data. + +### Optional + +- `use_local_storage_caching` (Boolean) Indicate whether intellicache is enabled to reduce load on the shared storage device. Will only be affective when shared storage is used. + +### Read-Only + +- `id` (String) GUID identifier of the resource pool. + +## Import + +Import is supported using the following syntax: + +```shell +# Hypervisor Resource Pool can be imported with the format HypervisorId,HypervisorResourcePoolId +terraform import citrix_xenserver_hypervisor_resource_pool.example-xenserver-hypervisor-resource-pool sbf0dc45-5c42-45a0-a15d-a3df4ff5da8c,ce571dd9-1a46-4b85-891c-484423322c53 +``` diff --git a/docs/resources/daas_zone.md b/docs/resources/zone.md similarity index 88% rename from docs/resources/daas_zone.md rename to docs/resources/zone.md index ebe7875..69c46f0 100644 --- a/docs/resources/daas_zone.md +++ b/docs/resources/zone.md @@ -1,13 +1,13 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "citrix_daas_zone Resource - citrix" +page_title: "citrix_zone Resource - citrix" subcategory: "" description: |- Manages a zone. For cloud DDC, Zones and Cloud Connectors are managed only by Citrix Cloud. Ensure you have a resource location manually created and connectors deployed in it. You may then apply or import the zone using the zone Id. --- -# citrix_daas_zone (Resource) +# citrix_zone (Resource) Manages a zone. For cloud DDC, Zones and Cloud Connectors are managed only by Citrix Cloud. Ensure you have a resource location manually created and connectors deployed in it. You may then apply or import the zone using the zone Id. @@ -15,7 +15,7 @@ For cloud DDC, Zones and Cloud Connectors are managed only by Citrix Cloud. Ensu ## Example Usage ```terraform -resource "citrix_daas_zone" "example-zone" { +resource "citrix_zone" "example-zone" { name = "example-zone" description = "zone example" metadata = [ @@ -59,5 +59,5 @@ Import is supported using the following syntax: ```shell # Zone can be imported by specifying the GUID -terraform import citrix_daas_zone.example-zone 06e5981e-dbaf-48db-b134-245fca2dc672 +terraform import citrix_zone.example-zone 06e5981e-dbaf-48db-b134-245fca2dc672 ``` diff --git a/examples/daas/basic_azure_mcs_vda/delivery_group.tf b/examples/daas/basic_azure_mcs_vda/delivery_group.tf index 9942aa1..c9be1b5 100644 --- a/examples/daas/basic_azure_mcs_vda/delivery_group.tf +++ b/examples/daas/basic_azure_mcs_vda/delivery_group.tf @@ -1,8 +1,8 @@ -resource "citrix_daas_delivery_group" "example-delivery-group" { +resource "citrix_delivery_group" "example-delivery-group" { name = "example-delivery-group" associated_machine_catalogs = [ { - machine_catalog = citrix_daas_machine_catalog.example-catalog.id + machine_catalog = citrix_machine_catalog.example-catalog.id machine_count = 1 } ] diff --git a/examples/daas/basic_azure_mcs_vda/hypervisors.tf b/examples/daas/basic_azure_mcs_vda/hypervisors.tf index 9099675..175b8db 100644 --- a/examples/daas/basic_azure_mcs_vda/hypervisors.tf +++ b/examples/daas/basic_azure_mcs_vda/hypervisors.tf @@ -1,6 +1,6 @@ -resource "citrix_daas_azure_hypervisor" "example-azure-hypervisor" { +resource "citrix_azure_hypervisor" "example-azure-hypervisor" { name = "example-azure-hyperv" - zone = citrix_daas_zone.example-zone.id + zone = citrix_zone.example-zone.id application_id = "" application_secret = "" subscription_id = "" diff --git a/examples/daas/basic_azure_mcs_vda/machine_catalogs.tf b/examples/daas/basic_azure_mcs_vda/machine_catalogs.tf index 862ffbb..0572011 100644 --- a/examples/daas/basic_azure_mcs_vda/machine_catalogs.tf +++ b/examples/daas/basic_azure_mcs_vda/machine_catalogs.tf @@ -1,4 +1,4 @@ -resource "citrix_daas_machine_catalog" "example-catalog" { +resource "citrix_machine_catalog" "example-catalog" { name = "example-catalog" description = "description for example catalog" allocation_type = "Random" @@ -6,10 +6,10 @@ resource "citrix_daas_machine_catalog" "example-catalog" { is_power_managed = true is_remote_pc = false provisioning_type = "MCS" - zone = citrix_daas_zone.example-zone.id + zone = citrix_zone.example-zone.id provisioning_scheme = { - hypervisor = citrix_daas_azure_hypervisor.example-azure-hypervisor.id - hypervisor_resource_pool = citrix_daas_azure_hypervisor_resource_pool.example-azure-rp.id + hypervisor = citrix_azure_hypervisor.example-azure-hypervisor.id + hypervisor_resource_pool = citrix_azure_hypervisor_resource_pool.example-azure-rp.id identity_type = "ActiveDirectory" machine_domain_identity = { domain = "" diff --git a/examples/daas/basic_azure_mcs_vda/resource_pools.tf b/examples/daas/basic_azure_mcs_vda/resource_pools.tf index e2a9317..d009288 100644 --- a/examples/daas/basic_azure_mcs_vda/resource_pools.tf +++ b/examples/daas/basic_azure_mcs_vda/resource_pools.tf @@ -1,6 +1,6 @@ -resource "citrix_daas_azure_hypervisor_resource_pool" "example-azure-rp" { +resource "citrix_azure_hypervisor_resource_pool" "example-azure-rp" { name = "example-azure-rp" - hypervisor = citrix_daas_azure_hypervisor.example-azure-hypervisor.id + hypervisor = citrix_azure_hypervisor.example-azure-hypervisor.id region = "" virtual_network_resource_group = "" virtual_network = "" diff --git a/examples/daas/basic_azure_mcs_vda/zones.tf b/examples/daas/basic_azure_mcs_vda/zones.tf index 677bddd..b3f0728 100644 --- a/examples/daas/basic_azure_mcs_vda/zones.tf +++ b/examples/daas/basic_azure_mcs_vda/zones.tf @@ -1,4 +1,4 @@ -resource "citrix_daas_zone" "example-zone" { +resource "citrix_zone" "example-zone" { name = "example zone" description = "description for example zone" } diff --git a/examples/daas/basic_gcp_mcs_vda/delivery_group.tf b/examples/daas/basic_gcp_mcs_vda/delivery_group.tf index 9942aa1..c9be1b5 100644 --- a/examples/daas/basic_gcp_mcs_vda/delivery_group.tf +++ b/examples/daas/basic_gcp_mcs_vda/delivery_group.tf @@ -1,8 +1,8 @@ -resource "citrix_daas_delivery_group" "example-delivery-group" { +resource "citrix_delivery_group" "example-delivery-group" { name = "example-delivery-group" associated_machine_catalogs = [ { - machine_catalog = citrix_daas_machine_catalog.example-catalog.id + machine_catalog = citrix_machine_catalog.example-catalog.id machine_count = 1 } ] diff --git a/examples/daas/basic_gcp_mcs_vda/hypervisors.tf b/examples/daas/basic_gcp_mcs_vda/hypervisors.tf index 2fb5df0..c90741f 100644 --- a/examples/daas/basic_gcp_mcs_vda/hypervisors.tf +++ b/examples/daas/basic_gcp_mcs_vda/hypervisors.tf @@ -1,6 +1,6 @@ -resource "citrix_daas_gcp_hypervisor" "example-gcp-hypervisor" { +resource "citrix_gcp_hypervisor" "example-gcp-hypervisor" { name = "example-gcp-hyperv" - zone = citrix_daas_zone.example-zone.id + zone = citrix_zone.example-zone.id service_account_id = "{GCP service account Id}" service_account_credentials = "{GCP service account private key}" } diff --git a/examples/daas/basic_gcp_mcs_vda/machine_catalogs.tf b/examples/daas/basic_gcp_mcs_vda/machine_catalogs.tf index 62d2b98..c9bd34e 100644 --- a/examples/daas/basic_gcp_mcs_vda/machine_catalogs.tf +++ b/examples/daas/basic_gcp_mcs_vda/machine_catalogs.tf @@ -1,4 +1,4 @@ -resource "citrix_daas_machine_catalog" "example-catalog" { +resource "citrix_machine_catalog" "example-catalog" { name = "example-gcp-catalog" description = "description for example catalog" allocation_type = "Random" @@ -6,10 +6,10 @@ resource "citrix_daas_machine_catalog" "example-catalog" { is_power_managed = true is_remote_pc = false provisioning_type = "MCS" - zone = citrix_daas_zone.example-zone.id + zone = citrix_zone.example-zone.id provisioning_scheme = { - hypervisor = citrix_daas_gcp_hypervisor.example-gcp-hypervisor.id - hypervisor_resource_pool = citrix_daas_gcp_hypervisor_resource_pool.example-gcp-rp.id + hypervisor = citrix_gcp_hypervisor.example-gcp-hypervisor.id + hypervisor_resource_pool = citrix_gcp_hypervisor_resource_pool.example-gcp-rp.id identity_type = "ActiveDirectory" machine_domain_identity = { domain = "" diff --git a/examples/daas/basic_gcp_mcs_vda/resource_pools.tf b/examples/daas/basic_gcp_mcs_vda/resource_pools.tf index 04bacb4..4a02502 100644 --- a/examples/daas/basic_gcp_mcs_vda/resource_pools.tf +++ b/examples/daas/basic_gcp_mcs_vda/resource_pools.tf @@ -1,6 +1,6 @@ -resource "citrix_daas_gcp_hypervisor_resource_pool" "example-gcp-rp" { +resource "citrix_gcp_hypervisor_resource_pool" "example-gcp-rp" { name = "example-gcp-rp" - hypervisor = citrix_daas_gcp_hypervisor.example-gcp-hypervisor.id + hypervisor = citrix_gcp_hypervisor.example-gcp-hypervisor.id project_name = "" region = "" subnets = [ diff --git a/examples/daas/basic_gcp_mcs_vda/zones.tf b/examples/daas/basic_gcp_mcs_vda/zones.tf index 677bddd..b3f0728 100644 --- a/examples/daas/basic_gcp_mcs_vda/zones.tf +++ b/examples/daas/basic_gcp_mcs_vda/zones.tf @@ -1,4 +1,4 @@ -resource "citrix_daas_zone" "example-zone" { +resource "citrix_zone" "example-zone" { name = "example zone" description = "description for example zone" } diff --git a/go.mod b/go.mod index 63484ad..865abac 100644 --- a/go.mod +++ b/go.mod @@ -5,24 +5,24 @@ go 1.21 toolchain go1.21.4 require ( - github.com/citrix/citrix-daas-rest-go v0.2.7 + github.com/citrix/citrix-daas-rest-go v0.3.0 github.com/google/uuid v1.6.0 - github.com/hashicorp/go-azure-helpers v0.66.1 + github.com/hashicorp/go-azure-helpers v0.66.2 github.com/hashicorp/terraform-plugin-docs v0.14.1 github.com/hashicorp/terraform-plugin-framework v1.5.0 github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 github.com/hashicorp/terraform-plugin-go v0.21.0 github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/hashicorp/terraform-plugin-testing v1.2.0 - golang.org/x/exp v0.0.0-20240119083558-1b970713d09a - golang.org/x/mod v0.14.0 + golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 + golang.org/x/mod v0.15.0 ) require ( github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.1.1 // indirect - github.com/Masterminds/sprig/v3 v3.2.2 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect + github.com/Masterminds/semver/v3 v3.2.0 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/armon/go-radix v1.0.0 // indirect @@ -40,7 +40,7 @@ require ( github.com/hashicorp/go-plugin v1.6.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/hc-install v0.6.2 // indirect + github.com/hashicorp/hc-install v0.6.3 // indirect github.com/hashicorp/hcl/v2 v2.19.1 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.20.0 // indirect @@ -49,7 +49,7 @@ require ( github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect - github.com/huandu/xstrings v1.3.2 // indirect + github.com/huandu/xstrings v1.3.3 // indirect github.com/imdario/mergo v0.3.15 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -68,13 +68,13 @@ require ( github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/zclconf/go-cty v1.14.2 // indirect - golang.org/x/crypto v0.18.0 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/sys v0.16.0 // indirect + golang.org/x/crypto v0.19.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe // indirect - google.golang.org/grpc v1.61.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c // indirect + google.golang.org/grpc v1.62.0 // indirect google.golang.org/protobuf v1.32.0 // indirect ) diff --git a/go.sum b/go.sum index 5607f50..9286c5d 100644 --- a/go.sum +++ b/go.sum @@ -2,15 +2,16 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= -github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= -github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= +github.com/ProtonMail/go-crypto v1.1.0-alpha.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= @@ -23,10 +24,8 @@ github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/citrix/citrix-daas-rest-go v0.2.7 h1:9X9F63VsWVUzhibsx86y0jOutjfM+CZ5KPW9lTWxCfI= -github.com/citrix/citrix-daas-rest-go v0.2.7/go.mod h1:wObnH2H4QP/nwoKR589SzQZ5dGTu3AoVi927NN/e77s= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/citrix/citrix-daas-rest-go v0.3.0 h1:5uQCzNfkWBY7bvxrOXseuzaTXpmuqlRNfYkZvdCJrXo= +github.com/citrix/citrix-daas-rest-go v0.3.0/go.mod h1:wObnH2H4QP/nwoKR589SzQZ5dGTu3AoVi927NN/e77s= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= @@ -46,8 +45,8 @@ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66D github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git/v5 v5.10.1 h1:tu8/D8i+TWxgKpzQ3Vc43e+kkhXqtsZCKI/egajKnxk= -github.com/go-git/go-git/v5 v5.10.1/go.mod h1:uEuHjxkHap8kAl//V5F/nNWwqIYtP/402ddd05mp0wg= +github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= +github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -68,8 +67,8 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-azure-helpers v0.66.1 h1:SokAckK9hvQ9PZO2TmZY/CGru8KWJ4A7hcRUggHMEus= -github.com/hashicorp/go-azure-helpers v0.66.1/go.mod h1:kJxXrFtJKJdOEqvad8pllAe7dhP4DbN8J6sqFZe47+4= +github.com/hashicorp/go-azure-helpers v0.66.2 h1:+Pzuo7pdKl0hBXXr5ymmhs4Q40tHAo2nAvHq4WgSjx8= +github.com/hashicorp/go-azure-helpers v0.66.2/go.mod h1:kJxXrFtJKJdOEqvad8pllAe7dhP4DbN8J6sqFZe47+4= github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -89,8 +88,8 @@ github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/C github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.2 h1:V1k+Vraqz4olgZ9UzKiAcbman9i9scg9GgSt/U3mw/M= -github.com/hashicorp/hc-install v0.6.2/go.mod h1:2JBpd+NCFKiHiu/yYCGaPyPHhZLxXTpz8oreHa/a3Ps= +github.com/hashicorp/hc-install v0.6.3 h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkmcoxqs= +github.com/hashicorp/hc-install v0.6.3/go.mod h1:KamGdbodYzlufbWh4r9NRo8y6GLHWZP2GBtdnms1Ln0= github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI= github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= @@ -120,8 +119,9 @@ github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv2 github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= @@ -212,29 +212,24 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -248,41 +243,34 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe h1:bQnxqljG/wqi4NTXu2+DJ3n7APcEA882QZ1JvhQAq9o= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c h1:NUsgEN92SQQqzfA+YtqYNqYmB3DMMYLlIwUZAQFVFbo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= +google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= +google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= diff --git a/internal/daas/resources/admin_role/admin_role_resource.go b/internal/daas/admin_role/admin_role_resource.go similarity index 99% rename from internal/daas/resources/admin_role/admin_role_resource.go rename to internal/daas/admin_role/admin_role_resource.go index f29e806..762db7f 100644 --- a/internal/daas/resources/admin_role/admin_role_resource.go +++ b/internal/daas/admin_role/admin_role_resource.go @@ -43,7 +43,7 @@ type adminRoleResource struct { // Metadata returns the resource type name. func (r *adminRoleResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_daas_admin_role" + resp.TypeName = req.ProviderTypeName + "_admin_role" } // Schema defines the schema for the resource. diff --git a/internal/daas/resources/admin_role/admin_role_resource_model.go b/internal/daas/admin_role/admin_role_resource_model.go similarity index 100% rename from internal/daas/resources/admin_role/admin_role_resource_model.go rename to internal/daas/admin_role/admin_role_resource_model.go diff --git a/internal/daas/data_sources/admin_scope/admin_scope_data_source.go b/internal/daas/admin_scope/admin_scope_data_source.go similarity index 98% rename from internal/daas/data_sources/admin_scope/admin_scope_data_source.go rename to internal/daas/admin_scope/admin_scope_data_source.go index 6dd230a..9c0cd0b 100644 --- a/internal/daas/data_sources/admin_scope/admin_scope_data_source.go +++ b/internal/daas/admin_scope/admin_scope_data_source.go @@ -28,7 +28,7 @@ type AdminScopeDataSource struct { } func (d *AdminScopeDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_daas_admin_scope" + resp.TypeName = req.ProviderTypeName + "_admin_scope" } func (d *AdminScopeDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { diff --git a/internal/daas/data_sources/admin_scope/admin_scope_data_source_model.go b/internal/daas/admin_scope/admin_scope_data_source_model.go similarity index 100% rename from internal/daas/data_sources/admin_scope/admin_scope_data_source_model.go rename to internal/daas/admin_scope/admin_scope_data_source_model.go diff --git a/internal/daas/resources/admin_scope/admin_scope_resource.go b/internal/daas/admin_scope/admin_scope_resource.go similarity index 99% rename from internal/daas/resources/admin_scope/admin_scope_resource.go rename to internal/daas/admin_scope/admin_scope_resource.go index 54f2940..33c4593 100644 --- a/internal/daas/resources/admin_scope/admin_scope_resource.go +++ b/internal/daas/admin_scope/admin_scope_resource.go @@ -40,7 +40,7 @@ type adminScopeResource struct { // Metadata returns the resource type name. func (r *adminScopeResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_daas_admin_scope" + resp.TypeName = req.ProviderTypeName + "_admin_scope" } // Schema defines the schema for the resource. diff --git a/internal/daas/resources/admin_scope/admin_scope_resource_model.go b/internal/daas/admin_scope/admin_scope_resource_model.go similarity index 100% rename from internal/daas/resources/admin_scope/admin_scope_resource_model.go rename to internal/daas/admin_scope/admin_scope_resource_model.go diff --git a/internal/daas/data_sources/application_folder_details/application_folder_details_data_source.go b/internal/daas/application/application_folder_details_data_source.go similarity index 97% rename from internal/daas/data_sources/application_folder_details/application_folder_details_data_source.go rename to internal/daas/application/application_folder_details_data_source.go index 642ca42..7e848f2 100644 --- a/internal/daas/data_sources/application_folder_details/application_folder_details_data_source.go +++ b/internal/daas/application/application_folder_details_data_source.go @@ -1,6 +1,6 @@ // Copyright © 2023. Citrix Systems, Inc. -package application_folder_details +package application import ( "context" @@ -26,7 +26,7 @@ type ApplicationDataSource struct { } func (d *ApplicationDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_daas_application_folder_details" + resp.TypeName = req.ProviderTypeName + "_application_folder_details" } // Schema defines the data source schema. diff --git a/internal/daas/data_sources/application_folder_details/application_folder_details_data_source_model.go b/internal/daas/application/application_folder_details_data_source_model.go similarity index 67% rename from internal/daas/data_sources/application_folder_details/application_folder_details_data_source_model.go rename to internal/daas/application/application_folder_details_data_source_model.go index 45ec3af..a5ecfa1 100644 --- a/internal/daas/data_sources/application_folder_details/application_folder_details_data_source_model.go +++ b/internal/daas/application/application_folder_details_data_source_model.go @@ -1,6 +1,6 @@ // Copyright © 2023. Citrix Systems, Inc. -package application_folder_details +package application import ( "github.com/citrix/citrix-daas-rest-go/citrixorchestration" @@ -13,21 +13,6 @@ type ApplicationFolderDetailsDataSourceModel struct { ApplicationsList []ApplicationResourceModel `tfsdk:"applications_list"` } -type ApplicationResourceModel struct { - Name types.String `tfsdk:"name"` - PublishedName types.String `tfsdk:"published_name"` - Description types.String `tfsdk:"description"` - InstalledAppProperties InstalledAppModel `tfsdk:"installed_app_properties"` - DeliveryGroups []types.String `tfsdk:"delivery_groups"` - ApplicationFolderPath types.String `tfsdk:"application_folder_path"` -} - -type InstalledAppModel struct { - CommandLineArguments types.String `tfsdk:"command_line_arguments"` - CommandLineExecutable types.String `tfsdk:"command_line_executable"` - WorkingDirectory types.String `tfsdk:"working_directory"` -} - func (r ApplicationFolderDetailsDataSourceModel) RefreshPropertyValues(apps *citrixorchestration.ApplicationResponseModelCollection) ApplicationFolderDetailsDataSourceModel { var res []ApplicationResourceModel @@ -37,7 +22,7 @@ func (r ApplicationFolderDetailsDataSourceModel) RefreshPropertyValues(apps *cit PublishedName: types.StringValue(app.GetPublishedName()), Description: types.StringValue(app.GetDescription()), ApplicationFolderPath: types.StringValue(*app.GetApplicationFolder().Name.Get()), - InstalledAppProperties: r.getInstalledAppProperties(app), + InstalledAppProperties: r.getInstalledAppProperties(app), // Fix: Change the type to *InstalledAppResponseModel DeliveryGroups: r.getDeliveryGroups(app), }) } @@ -47,8 +32,8 @@ func (r ApplicationFolderDetailsDataSourceModel) RefreshPropertyValues(apps *cit return r } -func (r ApplicationFolderDetailsDataSourceModel) getInstalledAppProperties(app citrixorchestration.ApplicationResponseModel) InstalledAppModel { - return InstalledAppModel{ +func (r ApplicationFolderDetailsDataSourceModel) getInstalledAppProperties(app citrixorchestration.ApplicationResponseModel) *InstalledAppResponseModel { + return &InstalledAppResponseModel{ CommandLineArguments: types.StringValue(app.GetInstalledAppProperties().CommandLineArguments), CommandLineExecutable: types.StringValue(app.GetInstalledAppProperties().CommandLineExecutable), WorkingDirectory: types.StringValue(app.GetInstalledAppProperties().WorkingDirectory), diff --git a/internal/daas/resources/application_folder/application_folder_resource.go b/internal/daas/application/application_folder_resource.go similarity index 99% rename from internal/daas/resources/application_folder/application_folder_resource.go rename to internal/daas/application/application_folder_resource.go index 78f8d70..e9e34f0 100644 --- a/internal/daas/resources/application_folder/application_folder_resource.go +++ b/internal/daas/application/application_folder_resource.go @@ -1,6 +1,6 @@ // Copyright © 2023. Citrix Systems, Inc. -package application_folder +package application import ( "context" @@ -38,7 +38,7 @@ type applicationFolderResource struct { // Metadata returns the data source type name. func (r *applicationFolderResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_daas_application_folder" + resp.TypeName = req.ProviderTypeName + "_application_folder" } // Configure adds the provider configured client to the data source. diff --git a/internal/daas/resources/application_folder/application_folder_resource_model.go b/internal/daas/application/application_folder_resource_model.go similarity index 97% rename from internal/daas/resources/application_folder/application_folder_resource_model.go rename to internal/daas/application/application_folder_resource_model.go index dd31288..7313fd5 100644 --- a/internal/daas/resources/application_folder/application_folder_resource_model.go +++ b/internal/daas/application/application_folder_resource_model.go @@ -1,6 +1,6 @@ // Copyright © 2023. Citrix Systems, Inc. -package application_folder +package application import ( "strings" diff --git a/internal/daas/resources/application/application_resource.go b/internal/daas/application/application_resource.go similarity index 99% rename from internal/daas/resources/application/application_resource.go rename to internal/daas/application/application_resource.go index 5b333ec..548e7a0 100644 --- a/internal/daas/resources/application/application_resource.go +++ b/internal/daas/application/application_resource.go @@ -43,7 +43,7 @@ type applicationResource struct { // Metadata returns the data source type name. func (r *applicationResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_daas_application" + resp.TypeName = req.ProviderTypeName + "_application" } // Configure adds the provider configured client to the data source. diff --git a/internal/daas/resources/application/application_resource_model.go b/internal/daas/application/application_resource_model.go similarity index 100% rename from internal/daas/resources/application/application_resource_model.go rename to internal/daas/application/application_resource_model.go diff --git a/internal/daas/resources/delivery_group/delivery_group_resource.go b/internal/daas/delivery_group/delivery_group_resource.go similarity index 97% rename from internal/daas/resources/delivery_group/delivery_group_resource.go rename to internal/daas/delivery_group/delivery_group_resource.go index fcc6b1d..e0959ab 100644 --- a/internal/daas/resources/delivery_group/delivery_group_resource.go +++ b/internal/daas/delivery_group/delivery_group_resource.go @@ -47,7 +47,16 @@ type deliveryGroupResource struct { // Metadata returns the resource type name. func (r *deliveryGroupResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_daas_delivery_group" + resp.TypeName = req.ProviderTypeName + "_delivery_group" +} + +// Configure adds the provider configured client to the resource. +func (r *deliveryGroupResource) Configure(_ context.Context, req resource.ConfigureRequest, _ *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + r.client = req.ProviderData.(*citrixdaasclient.CitrixDaasClient) } // Schema defines the schema for the resource. @@ -473,19 +482,17 @@ func (r *deliveryGroupResource) Schema(_ context.Context, _ resource.SchemaReque Description: "The total number of machines in the delivery group.", Computed: true, }, + "policy_set_id": schema.StringAttribute{ + Description: "GUID identifier of the policy set.", + Optional: true, + Validators: []validator.String{ + stringvalidator.RegexMatches(regexp.MustCompile(util.GuidRegex), "must be specified with ID in GUID format"), + }, + }, }, } } -// Configure adds the provider configured client to the resource. -func (r *deliveryGroupResource) Configure(_ context.Context, req resource.ConfigureRequest, _ *resource.ConfigureResponse) { - if req.ProviderData == nil { - return - } - - r.client = req.ProviderData.(*citrixdaasclient.CitrixDaasClient) -} - func (r *deliveryGroupResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { defer util.PanicHandler(&resp.Diagnostics) @@ -596,6 +603,12 @@ func (r *deliveryGroupResource) Create(ctx context.Context, req resource.CreateR return } + if plan.PolicySetId.ValueString() != "" { + deliveryGroup.SetPolicySetGuid(plan.PolicySetId.ValueString()) + } else { + deliveryGroup.SetPolicySetGuid(types.StringNull().ValueString()) + } + plan = plan.RefreshPropertyValues(deliveryGroup, deliveryGroupDesktops, deliveryGroupPowerTimeSchemes, deliveryGroupMachines, deliveryGroupRebootSchedule) // Set state to fully populated data @@ -646,6 +659,10 @@ func (r *deliveryGroupResource) Read(ctx context.Context, req resource.ReadReque return } + if deliveryGroup.GetPolicySetGuid() == util.DefaultSitePolicySetId { + deliveryGroup.SetPolicySetGuid("") + } + state = state.RefreshPropertyValues(deliveryGroup, deliveryGroupDesktops, deliveryGroupPowerTimeSchemes, deliveryGroupMachines, deliveryGroupRebootSchedule) // Set refreshed state @@ -729,6 +746,12 @@ func (r *deliveryGroupResource) Update(ctx context.Context, req resource.UpdateR return } + if plan.PolicySetId.ValueString() != "" { + updatedDeliveryGroup.SetPolicySetGuid(plan.PolicySetId.ValueString()) + } else { + updatedDeliveryGroup.SetPolicySetGuid(types.StringNull().ValueString()) + } + plan = plan.RefreshPropertyValues(updatedDeliveryGroup, deliveryGroupDesktops, deliveryGroupPowerTimeSchemes, deliveryGroupMachines, deliveryGroupRebootSchedule) diags = resp.State.Set(ctx, plan) @@ -736,7 +759,6 @@ func (r *deliveryGroupResource) Update(ctx context.Context, req resource.UpdateR if resp.Diagnostics.HasError() { return } - } // Deletes the resource and removes the Terraform state on success. diff --git a/internal/daas/resources/delivery_group/delivery_group_resource_model.go b/internal/daas/delivery_group/delivery_group_resource_model.go similarity index 96% rename from internal/daas/resources/delivery_group/delivery_group_resource_model.go rename to internal/daas/delivery_group/delivery_group_resource_model.go index 7899f05..db2807f 100644 --- a/internal/daas/resources/delivery_group/delivery_group_resource_model.go +++ b/internal/daas/delivery_group/delivery_group_resource_model.go @@ -98,6 +98,7 @@ type DeliveryGroupResourceModel struct { AutoscaleSettings *DeliveryGroupPowerManagementSettings `tfsdk:"autoscale_settings"` RebootSchedules []DeliveryGroupRebootSchedule `tfsdk:"reboot_schedules"` TotalMachines types.Int64 `tfsdk:"total_machines"` + PolicySetId types.String `tfsdk:"policy_set_id"` } func (r DeliveryGroupResourceModel) RefreshPropertyValues(deliveryGroup *citrixorchestration.DeliveryGroupDetailResponseModel, dgDesktops *citrixorchestration.DesktopResponseModelCollection, dgPowerTimeSchemes *citrixorchestration.PowerTimeSchemeResponseModelCollection, dgMachines *citrixorchestration.MachineResponseModelCollection, dgRebootSchedule *citrixorchestration.RebootScheduleResponseModelCollection) DeliveryGroupResourceModel { @@ -114,6 +115,12 @@ func (r DeliveryGroupResourceModel) RefreshPropertyValues(deliveryGroup *citrixo r.Description = types.StringNull() } + if deliveryGroup.GetPolicySetGuid() != "" { + r.PolicySetId = types.StringValue(deliveryGroup.GetPolicySetGuid()) + } else { + r.PolicySetId = types.StringNull() + } + r = r.updatePlanWithRestrictedAccessUsers(deliveryGroup) r = r.updatePlanWithDesktops(dgDesktops) r = r.updatePlanWithAssociatedCatalogs(dgMachines) diff --git a/internal/daas/resources/delivery_group/delivery_group_utils.go b/internal/daas/delivery_group/delivery_group_utils.go similarity index 98% rename from internal/daas/resources/delivery_group/delivery_group_utils.go rename to internal/daas/delivery_group/delivery_group_utils.go index 34c44d4..ef9756d 100644 --- a/internal/daas/resources/delivery_group/delivery_group_utils.go +++ b/internal/daas/delivery_group/delivery_group_utils.go @@ -5,6 +5,7 @@ package delivery_group import ( "context" "fmt" + "net/http" "reflect" "regexp" "strconv" @@ -166,7 +167,7 @@ func validatePowerManagementSettings(plan DeliveryGroupResourceModel, sessionSup func validateAndReturnMachineCatalogSessionSupport(ctx context.Context, client citrixdaasclient.CitrixDaasClient, diagnostics *diag.Diagnostics, dgMachineCatalogs []DeliveryGroupMachineCatalogModel, addErrorIfCatalogNotFound bool) (catalogSessionSupport *citrixorchestration.SessionSupport, isPowerManagedCatalog bool, isRemotePcCatalog bool, catalogIdentityType citrixorchestration.IdentityType, err error) { var sessionSupport *citrixorchestration.SessionSupport var provisioningType *citrixorchestration.ProvisioningType - identityType := citrixorchestration.IDENTITYTYPE_UNKNOWN + var identityType citrixorchestration.IdentityType isPowerManaged := false isRemotePc := false for _, dgMachineCatalog := range dgMachineCatalogs { @@ -185,6 +186,8 @@ func validateAndReturnMachineCatalogSessionSupport(ctx context.Context, client c provisioningType = &catalog.ProvisioningType isPowerManaged = catalog.GetIsPowerManaged() isRemotePc = catalog.GetIsRemotePC() + provScheme := catalog.GetProvisioningScheme() + identityType = provScheme.GetIdentityType() } if *provisioningType != catalog.GetProvisioningType() { @@ -195,11 +198,9 @@ func validateAndReturnMachineCatalogSessionSupport(ctx context.Context, client c return sessionSupport, false, false, citrixorchestration.IDENTITYTYPE_UNKNOWN, err } - if identityType == citrixorchestration.IDENTITYTYPE_UNKNOWN { - identityType = *catalog.GetProvisioningScheme().IdentityType - } + provScheme := catalog.GetProvisioningScheme() - if identityType != *catalog.GetProvisioningScheme().IdentityType { + if identityType != provScheme.GetIdentityType() { err := fmt.Errorf("associated_machine_catalogs must have catalogs with the same identity type in provisioning scheme") diagnostics.AddError("Error validating associated Machine Catalogs", "Ensure all associated Machine Catalogs have the same identity type in provisioning scheme.", @@ -605,6 +606,7 @@ func getRequestModelForDeliveryGroupCreate(plan DeliveryGroupResourceModel, cata body.SetDesktops(deliveryGroupDesktopsArray) body.SetDefaultDesktopPublishedName(plan.Name.ValueString()) body.SetSimpleAccessPolicy(simpleAccessPolicy) + body.SetPolicySetGuid(plan.PolicySetId.ValueString()) if identityType == citrixorchestration.IDENTITYTYPE_AZURE_AD { body.SetMachineLogOnType(citrixorchestration.MACHINELOGONTYPE_AZURE_AD) } else { @@ -691,6 +693,12 @@ func getRequestModelForDeliveryGroupUpdate(plan DeliveryGroupResourceModel, curr editDeliveryGroupRequestBody.SetRebootSchedules(deliveryGroupRebootScheduleArray) editDeliveryGroupRequestBody.SetAdvancedAccessPolicy(advancedAccessPolicies) + if plan.PolicySetId.ValueString() != "" { + editDeliveryGroupRequestBody.SetPolicySetGuid(plan.PolicySetId.ValueString()) + } else { + editDeliveryGroupRequestBody.SetPolicySetGuid(util.DefaultSitePolicySetId) + } + if plan.AutoscaleSettings != nil { if plan.AutoscaleSettings.Timezone.ValueString() != "" { @@ -1200,3 +1208,19 @@ func preserveOrderInPoolSizeSchedule(poolSizeScheduleInPlan, poolSizeScheduleInR return poolSizeSchedules } + +func getDefaultPolicySetId(client *citrixdaasclient.CitrixDaasClient, ctx context.Context) (string, *http.Response, error) { + var policySetId string + getDefaultPolicySetRequest := client.ApiClient.GpoDAAS.GpoReadGpoPolicySets(ctx) + policySetsResponse, httpResp, err := citrixdaasclient.AddRequestData(getDefaultPolicySetRequest, client).Execute() + if err != nil { + return "", httpResp, err + } + for _, policySet := range policySetsResponse.Items { + if policySet.GetName() == util.DefaultSitePolicySetName { + policySetId = policySet.GetPolicySetGuid() + break + } + } + return policySetId, httpResp, nil +} diff --git a/internal/daas/resources/hypervisor/aws_hypervisor_resource.go b/internal/daas/hypervisor/aws_hypervisor_resource.go similarity index 99% rename from internal/daas/resources/hypervisor/aws_hypervisor_resource.go rename to internal/daas/hypervisor/aws_hypervisor_resource.go index 9eb5cfb..def8854 100644 --- a/internal/daas/resources/hypervisor/aws_hypervisor_resource.go +++ b/internal/daas/hypervisor/aws_hypervisor_resource.go @@ -39,7 +39,7 @@ type awsHypervisorResource struct { // Metadata returns the resource type name. func (r *awsHypervisorResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_daas_aws_hypervisor" + resp.TypeName = req.ProviderTypeName + "_aws_hypervisor" } // Schema defines the schema for the resource. diff --git a/internal/daas/resources/hypervisor/aws_hypervisor_resource_model.go b/internal/daas/hypervisor/aws_hypervisor_resource_model.go similarity index 100% rename from internal/daas/resources/hypervisor/aws_hypervisor_resource_model.go rename to internal/daas/hypervisor/aws_hypervisor_resource_model.go diff --git a/internal/daas/resources/hypervisor/azure_hypervisor_resource.go b/internal/daas/hypervisor/azure_hypervisor_resource.go similarity index 99% rename from internal/daas/resources/hypervisor/azure_hypervisor_resource.go rename to internal/daas/hypervisor/azure_hypervisor_resource.go index effe492..8c89c59 100644 --- a/internal/daas/resources/hypervisor/azure_hypervisor_resource.go +++ b/internal/daas/hypervisor/azure_hypervisor_resource.go @@ -41,7 +41,7 @@ type azureHypervisorResource struct { // Metadata returns the resource type name. func (r *azureHypervisorResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_daas_azure_hypervisor" + resp.TypeName = req.ProviderTypeName + "_azure_hypervisor" } // Schema defines the schema for the resource. diff --git a/internal/daas/resources/hypervisor/azure_hypervisor_resource_model.go b/internal/daas/hypervisor/azure_hypervisor_resource_model.go similarity index 100% rename from internal/daas/resources/hypervisor/azure_hypervisor_resource_model.go rename to internal/daas/hypervisor/azure_hypervisor_resource_model.go diff --git a/internal/daas/resources/hypervisor/gcp_hypervisor_resource.go b/internal/daas/hypervisor/gcp_hypervisor_resource.go similarity index 99% rename from internal/daas/resources/hypervisor/gcp_hypervisor_resource.go rename to internal/daas/hypervisor/gcp_hypervisor_resource.go index c4e73e9..e19eba6 100644 --- a/internal/daas/resources/hypervisor/gcp_hypervisor_resource.go +++ b/internal/daas/hypervisor/gcp_hypervisor_resource.go @@ -39,7 +39,7 @@ type gcpHypervisorResource struct { // Metadata returns the resource type name. func (r *gcpHypervisorResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_daas_gcp_hypervisor" + resp.TypeName = req.ProviderTypeName + "_gcp_hypervisor" } // Schema defines the schema for the resource. diff --git a/internal/daas/resources/hypervisor/gcp_hypervisor_resource_model.go b/internal/daas/hypervisor/gcp_hypervisor_resource_model.go similarity index 100% rename from internal/daas/resources/hypervisor/gcp_hypervisor_resource_model.go rename to internal/daas/hypervisor/gcp_hypervisor_resource_model.go diff --git a/internal/daas/resources/hypervisor/hypervisor_common.go b/internal/daas/hypervisor/hypervisor_common.go similarity index 76% rename from internal/daas/resources/hypervisor/hypervisor_common.go rename to internal/daas/hypervisor/hypervisor_common.go index 781a1c8..ba3aa6a 100644 --- a/internal/daas/resources/hypervisor/hypervisor_common.go +++ b/internal/daas/hypervisor/hypervisor_common.go @@ -4,6 +4,8 @@ package hypervisor import ( "context" + "fmt" + "time" citrixorchestration "github.com/citrix/citrix-daas-rest-go/citrixorchestration" citrixdaasclient "github.com/citrix/citrix-daas-rest-go/client" @@ -13,6 +15,14 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource" ) +type HYPERVISOR_FAULT_STATE string + +const ( + Initializing HYPERVISOR_FAULT_STATE = "Initializing" +) + +const base_delay_in_seconds = time.Duration(10) * time.Second + // Create creates the resource and sets the initial Terraform state. func CreateHypervisor(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, diagnostics *diag.Diagnostics, createHypervisorRequestBody citrixorchestration.CreateHypervisorRequestModel) (*citrixorchestration.HypervisorDetailResponseModel, error) { // Create new hypervisor @@ -29,16 +39,36 @@ func CreateHypervisor(ctx context.Context, client *citrixdaasclient.CitrixDaasCl return nil, err } - err = util.ProcessAsyncJobResponse(ctx, client, httpResp, "Error creating Hypervisor", diagnostics, 10) + err = util.ProcessAsyncJobResponse(ctx, client, httpResp, "Error creating Hypervisor", diagnostics, 10, true) if err != nil { return nil, err } - hypervisor, err := util.GetHypervisor(ctx, client, diagnostics, createHypervisorRequestBody.ConnectionDetails.GetName()) - if err != nil { - return hypervisor, err + var hypervisor *citrixorchestration.HypervisorDetailResponseModel + for i := 1; i <= 6; i++ { + hypervisor, err = util.GetHypervisor(ctx, client, diagnostics, createHypervisorRequestBody.ConnectionDetails.GetName()) + + if err != nil { + return hypervisor, err + } + + fault := hypervisor.GetFault() + faultState := fault.GetState() + if faultState == string(Initializing) { + if i != 6 { + time.Sleep(time.Duration(i) * base_delay_in_seconds) + } + continue + } + + return hypervisor, nil } + diagnostics.AddError( + "Error creating Hypervisor "+createHypervisorRequestBody.ConnectionDetails.GetName(), + fmt.Sprintf("Hypervisor %s is stuck in initializing state. Delete the hypervisor and try again.", createHypervisorRequestBody.ConnectionDetails.GetName()), + ) + return hypervisor, nil } @@ -56,7 +86,7 @@ func UpdateHypervisor(ctx context.Context, client *citrixdaasclient.CitrixDaasCl ) } - err = util.ProcessAsyncJobResponse(ctx, client, httpResp, "Error updating Hypervisor "+hypervisor.GetName(), diagnostics, 10) + err = util.ProcessAsyncJobResponse(ctx, client, httpResp, "Error updating Hypervisor "+hypervisor.GetName(), diagnostics, 10, true) if err != nil { return nil, err } diff --git a/internal/daas/hypervisor/nutanix_hypervisor_resource.go b/internal/daas/hypervisor/nutanix_hypervisor_resource.go new file mode 100644 index 0000000..ccedb2a --- /dev/null +++ b/internal/daas/hypervisor/nutanix_hypervisor_resource.go @@ -0,0 +1,323 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package hypervisor + +import ( + "context" + "net/http" + "regexp" + + citrixorchestration "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + citrixdaasclient "github.com/citrix/citrix-daas-rest-go/client" + "github.com/citrix/terraform-provider-citrix/internal/util" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &nutanixHypervisorResource{} + _ resource.ResourceWithConfigure = &nutanixHypervisorResource{} + _ resource.ResourceWithImportState = &nutanixHypervisorResource{} +) + +// NewHypervisorResource is a helper function to simplify the provider implementation. +func NewNutanixHypervisorResource() resource.Resource { + return &nutanixHypervisorResource{} +} + +type nutanixHypervisorResource struct { + client *citrixdaasclient.CitrixDaasClient +} + +// Metadata implements resource.Resource. +func (*nutanixHypervisorResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_nutanix_hypervisor" +} + +// Configure implements resource.ResourceWithConfigure. +func (r *nutanixHypervisorResource) Configure(_ context.Context, req resource.ConfigureRequest, _ *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + r.client = req.ProviderData.(*citrixdaasclient.CitrixDaasClient) +} + +// Schema implements resource.Resource. +func (r *nutanixHypervisorResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Manages a Nutanix hypervisor.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "GUID identifier of the hypervisor.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "name": schema.StringAttribute{ + Description: "Name of the hypervisor.", + Required: true, + }, + "zone": schema.StringAttribute{ + Description: "Id of the zone the hypervisor is associated with.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.RegexMatches(regexp.MustCompile(util.GuidRegex), "must be specified with ID in GUID format"), + }, + }, + "username": schema.StringAttribute{ + Description: "Username of the hypervisor.", + Required: true, + }, + "password": schema.StringAttribute{ + Description: "Password of the hypervisor.", + Required: true, + }, + "password_format": schema.StringAttribute{ + Description: "Password format of the hypervisor. Choose between Base64 and PlainText.", + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf( + string(citrixorchestration.IDENTITYPASSWORDFORMAT_BASE64), + string(citrixorchestration.IDENTITYPASSWORDFORMAT_PLAIN_TEXT), + ), + }, + }, + "addresses": schema.ListAttribute{ + ElementType: types.StringType, + Description: "Hypervisor address(es). At least one is required.", + Required: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + listvalidator.ValueStringsAre( + stringvalidator.RegexMatches(regexp.MustCompile(util.IPv4Regex), "must be a valid IPv4 address without protoccol (http:// or https://) and port number."), + ), + }, + }, + "max_absolute_active_actions": schema.Int64Attribute{ + Description: "Maximum number of actions that can execute in parallel on the hypervisor. Default is 100.", + Optional: true, + Computed: true, + Default: int64default.StaticInt64(100), + Validators: []validator.Int64{ + int64validator.AtLeast(1), + }, + }, + "max_absolute_new_actions_per_minute": schema.Int64Attribute{ + Description: "Maximum number of actions that can be started on the hypervisor per-minute. Default is 10.", + Optional: true, + Computed: true, + Default: int64default.StaticInt64(10), + Validators: []validator.Int64{ + int64validator.AtLeast(1), + }, + }, + "max_power_actions_percentage_of_machines": schema.Int64Attribute{ + Description: "Maximum percentage of machines on the hypervisor which can have their power state changed simultaneously. Default is 20.", + Optional: true, + Computed: true, + Default: int64default.StaticInt64(20), + Validators: []validator.Int64{ + int64validator.AtLeast(1), + }, + }, + }, + } +} + +// ImportState implements resource.ResourceWithImportState. +func (*nutanixHypervisorResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// Create implements resource.Resource. +func (r *nutanixHypervisorResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Retrieve values from plan + var plan NutanixHypervisorResourceModel + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + /* Generate ConnectionDetails API request body from plan */ + var connectionDetails citrixorchestration.HypervisorConnectionDetailRequestModel + connectionDetails.SetName(plan.Name.ValueString()) + connectionDetails.SetZone(plan.Zone.ValueString()) + connectionDetails.SetConnectionType(citrixorchestration.HYPERVISORCONNECTIONTYPE_CUSTOM) + connectionDetails.SetPluginId(util.NUTANIX_PLUGIN_ID) + connectionDetails.SetUserName(plan.Username.ValueString()) + connectionDetails.SetPassword(plan.Password.ValueString()) + pwdFormat, err := citrixorchestration.NewIdentityPasswordFormatFromValue(plan.PasswordFormat.ValueString()) + if err != nil || pwdFormat == nil { + resp.Diagnostics.AddError( + "Error creating Hypervisor for Nutanix", + "Unsupported password format: "+plan.PasswordFormat.ValueString(), + ) + } + connectionDetails.SetPasswordFormat(*pwdFormat) + + addresses := util.ConvertBaseStringArrayToPrimitiveStringArray(plan.Addresses) + connectionDetails.SetAddresses(addresses) + connectionDetails.SetMaxAbsoluteActiveActions(int32(plan.MaxAbsoluteActiveActions.ValueInt64())) + connectionDetails.SetMaxAbsoluteNewActionsPerMinute(int32(plan.MaxAbsoluteNewActionsPerMinute.ValueInt64())) + connectionDetails.SetMaxPowerActionsPercentageOfMachines(int32(plan.MaxPowerActionsPercentageOfMachines.ValueInt64())) + + var body citrixorchestration.CreateHypervisorRequestModel + body.SetConnectionDetails(connectionDetails) + + hypervisor, err := CreateHypervisor(ctx, r.client, &resp.Diagnostics, body) + if err != nil { + // Directly return. Error logs have been populated in common function. + return + } + + // Map response body to schema and populate Computed attribute values + plan = plan.RefreshPropertyValues(hypervisor) + + // Set state to fully populated data + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read implements resource.Resource. +func (r *nutanixHypervisorResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Get current state + var state NutanixHypervisorResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get refreshed hypervisor properties from Orchestration + hypervisorId := state.Id.ValueString() + hypervisor, err := readHypervisor(ctx, r.client, resp, hypervisorId) + if err != nil { + return + } + + if hypervisor.GetConnectionType() != citrixorchestration.HYPERVISORCONNECTIONTYPE_CUSTOM || hypervisor.GetPluginId() != util.NUTANIX_PLUGIN_ID { + resp.Diagnostics.AddError( + "Error reading Hypervisor", + "Hypervisor "+hypervisor.GetName()+" is not a Nutanix connection type hypervisor.", + ) + return + } + + // Overwrite hypervisor with refreshed state + state = state.RefreshPropertyValues(hypervisor) + + // Set refreshed state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Update implements resource.Resource. +func (r *nutanixHypervisorResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Retrieve values from plan + var plan NutanixHypervisorResourceModel + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get refreshed hypervisor properties from Orchestration + hypervisorId := plan.Id.ValueString() + hypervisor, err := util.GetHypervisor(ctx, r.client, &resp.Diagnostics, hypervisorId) + if err != nil { + return + } + + // Construct the update model + var editHypervisorRequestBody citrixorchestration.EditHypervisorConnectionRequestModel + editHypervisorRequestBody.SetName(plan.Name.ValueString()) + editHypervisorRequestBody.SetConnectionType(citrixorchestration.HYPERVISORCONNECTIONTYPE_CUSTOM) + editHypervisorRequestBody.SetUserName(plan.Username.ValueString()) + editHypervisorRequestBody.SetPassword(plan.Password.ValueString()) + pwdFormat, err := citrixorchestration.NewIdentityPasswordFormatFromValue(plan.PasswordFormat.ValueString()) + if err != nil || pwdFormat == nil { + resp.Diagnostics.AddError( + "Error updating Hypervisor for Nutanix", + "Unsupported password format: "+plan.PasswordFormat.ValueString(), + ) + } + editHypervisorRequestBody.SetPasswordFormat(*pwdFormat) + + addresses := util.ConvertBaseStringArrayToPrimitiveStringArray(plan.Addresses) + editHypervisorRequestBody.SetAddresses(addresses) + + editHypervisorRequestBody.SetMaxAbsoluteActiveActions(int32(plan.MaxAbsoluteActiveActions.ValueInt64())) + editHypervisorRequestBody.SetMaxAbsoluteNewActionsPerMinute(int32(plan.MaxAbsoluteNewActionsPerMinute.ValueInt64())) + editHypervisorRequestBody.SetMaxPowerActionsPercentageOfMachines(int32(plan.MaxPowerActionsPercentageOfMachines.ValueInt64())) + + // Patch hypervisor + updatedHypervisor, err := UpdateHypervisor(ctx, r.client, &resp.Diagnostics, hypervisor, editHypervisorRequestBody) + if err != nil { + return + } + + // Update resource state with updated property values + plan = plan.RefreshPropertyValues(updatedHypervisor) + + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete implements resource.Resource. +func (r *nutanixHypervisorResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Retrieve values from state + var state NutanixHypervisorResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Delete existing hypervisor + hypervisorId := state.Id.ValueString() + hypervisorName := state.Name.ValueString() + deleteHypervisorRequest := r.client.ApiClient.HypervisorsAPIsDAAS.HypervisorsDeleteHypervisor(ctx, hypervisorId) + httpResp, err := citrixdaasclient.AddRequestData(deleteHypervisorRequest, r.client).Execute() + if err != nil && httpResp.StatusCode != http.StatusNotFound { + resp.Diagnostics.AddError( + "Error deleting Hypervisor "+hypervisorName, + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + return + } +} diff --git a/internal/daas/hypervisor/nutanix_hypervisor_resource_model.go b/internal/daas/hypervisor/nutanix_hypervisor_resource_model.go new file mode 100644 index 0000000..ed2e5a4 --- /dev/null +++ b/internal/daas/hypervisor/nutanix_hypervisor_resource_model.go @@ -0,0 +1,39 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package hypervisor + +import ( + citrixorchestration "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + "github.com/citrix/terraform-provider-citrix/internal/util" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// HypervisorResourceModel maps the resource schema data. +type NutanixHypervisorResourceModel struct { + /**** Connection Details ****/ + Id types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Zone types.String `tfsdk:"zone"` + /** Nutanix Connection **/ + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` + PasswordFormat types.String `tfsdk:"password_format"` + Addresses []types.String `tfsdk:"addresses"` + MaxAbsoluteActiveActions types.Int64 `tfsdk:"max_absolute_active_actions"` + MaxAbsoluteNewActionsPerMinute types.Int64 `tfsdk:"max_absolute_new_actions_per_minute"` + MaxPowerActionsPercentageOfMachines types.Int64 `tfsdk:"max_power_actions_percentage_of_machines"` +} + +func (r NutanixHypervisorResourceModel) RefreshPropertyValues(hypervisor *citrixorchestration.HypervisorDetailResponseModel) NutanixHypervisorResourceModel { + r.Id = types.StringValue(hypervisor.GetId()) + r.Name = types.StringValue(hypervisor.GetName()) + r.Username = types.StringValue(hypervisor.GetUserName()) + r.Addresses = util.RefreshList(r.Addresses, hypervisor.GetAddresses()) + r.MaxAbsoluteActiveActions = types.Int64Value(int64(hypervisor.GetMaxAbsoluteActiveActions())) + r.MaxAbsoluteNewActionsPerMinute = types.Int64Value(int64(hypervisor.GetMaxAbsoluteNewActionsPerMinute())) + r.MaxPowerActionsPercentageOfMachines = types.Int64Value(int64(hypervisor.GetMaxPowerActionsPercentageOfMachines())) + + hypZone := hypervisor.GetZone() + r.Zone = types.StringValue(hypZone.GetId()) + return r +} diff --git a/internal/daas/hypervisor/vsphere_hypervisor_resource.go b/internal/daas/hypervisor/vsphere_hypervisor_resource.go new file mode 100644 index 0000000..24dc9fe --- /dev/null +++ b/internal/daas/hypervisor/vsphere_hypervisor_resource.go @@ -0,0 +1,346 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package hypervisor + +import ( + "context" + "net/http" + "regexp" + + citrixorchestration "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + citrixdaasclient "github.com/citrix/citrix-daas-rest-go/client" + "github.com/citrix/terraform-provider-citrix/internal/util" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &vsphereHypervisorResource{} + _ resource.ResourceWithConfigure = &vsphereHypervisorResource{} + _ resource.ResourceWithImportState = &vsphereHypervisorResource{} +) + +// NewHypervisorResource is a helper function to simplify the provider implementation. +func NewVsphereHypervisorResource() resource.Resource { + return &vsphereHypervisorResource{} +} + +type vsphereHypervisorResource struct { + client *citrixdaasclient.CitrixDaasClient +} + +// Metadata implements resource.Resource. +func (*vsphereHypervisorResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_vsphere_hypervisor" +} + +// Configure implements resource.ResourceWithConfigure. +func (r *vsphereHypervisorResource) Configure(_ context.Context, req resource.ConfigureRequest, _ *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + r.client = req.ProviderData.(*citrixdaasclient.CitrixDaasClient) +} + +// Schema implements resource.Resource. +func (r *vsphereHypervisorResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Manages a Vsphere hypervisor.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "GUID identifier of the hypervisor.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "name": schema.StringAttribute{ + Description: "Name of the hypervisor.", + Required: true, + }, + "zone": schema.StringAttribute{ + Description: "Id of the zone the hypervisor is associated with.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.RegexMatches(regexp.MustCompile(util.GuidRegex), "must be specified with ID in GUID format"), + }, + }, + "username": schema.StringAttribute{ + Description: "Username of the hypervisor.", + Required: true, + }, + "password": schema.StringAttribute{ + Description: "Password of the hypervisor.", + Required: true, + }, + "password_format": schema.StringAttribute{ + Description: "Password format of the hypervisor. Choose between Base64 and PlainText.", + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf( + string(citrixorchestration.IDENTITYPASSWORDFORMAT_BASE64), + string(citrixorchestration.IDENTITYPASSWORDFORMAT_PLAIN_TEXT), + ), + }, + }, + "addresses": schema.ListAttribute{ + ElementType: types.StringType, + Description: "Hypervisor address(es). At least one is required.", + Required: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + listvalidator.ValueStringsAre( + stringvalidator.RegexMatches(regexp.MustCompile(util.IPv4RegexWithProtocol), "must be a valid IPv4 address prefixed with protoccol (http:// or https://)"), + ), + }, + }, + "ssl_thumbprints": schema.ListAttribute{ + ElementType: types.StringType, + Description: "SSL certificate thumbprints to consider acceptable for this connection. If not specified, and the hypervisor uses SSL for its connection, the SSL certificate's root certification authority and any intermediate certificates must be trusted.", + Optional: true, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + listvalidator.ValueStringsAre( + stringvalidator.RegexMatches(regexp.MustCompile(util.SslThumbprintRegex), "must be specified with SSL thumbprint without colons"), + ), + }, + }, + "max_absolute_active_actions": schema.Int64Attribute{ + Description: "Maximum number of actions that can execute in parallel on the hypervisor. Default is 40.", + Optional: true, + Computed: true, + Default: int64default.StaticInt64(40), + Validators: []validator.Int64{ + int64validator.AtLeast(1), + }, + }, + "max_absolute_new_actions_per_minute": schema.Int64Attribute{ + Description: "Maximum number of actions that can be started on the hypervisor per-minute. Default is 10.", + Optional: true, + Computed: true, + Default: int64default.StaticInt64(10), + Validators: []validator.Int64{ + int64validator.AtLeast(1), + }, + }, + "max_power_actions_percentage_of_machines": schema.Int64Attribute{ + Description: "Maximum percentage of machines on the hypervisor which can have their power state changed simultaneously. Default is 20.", + Optional: true, + Computed: true, + Default: int64default.StaticInt64(20), + Validators: []validator.Int64{ + int64validator.AtLeast(1), + }, + }, + }, + } +} + +// ImportState implements resource.ResourceWithImportState. +func (*vsphereHypervisorResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// Create implements resource.Resource. +func (r *vsphereHypervisorResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Retrieve values from plan + var plan VsphereHypervisorResourceModel + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + /* Generate ConnectionDetails API request body from plan */ + var connectionDetails citrixorchestration.HypervisorConnectionDetailRequestModel + connectionDetails.SetName(plan.Name.ValueString()) + connectionDetails.SetZone(plan.Zone.ValueString()) + connectionDetails.SetConnectionType(citrixorchestration.HYPERVISORCONNECTIONTYPE_V_CENTER) + connectionDetails.SetUserName(plan.Username.ValueString()) + connectionDetails.SetPassword(plan.Password.ValueString()) + pwdFormat, err := citrixorchestration.NewIdentityPasswordFormatFromValue(plan.PasswordFormat.ValueString()) + if err != nil || pwdFormat == nil { + resp.Diagnostics.AddError( + "Error creating Hypervisor for Vsphere", + "Unsupported password format: "+plan.PasswordFormat.ValueString(), + ) + } + connectionDetails.SetPasswordFormat(*pwdFormat) + + addresses := util.ConvertBaseStringArrayToPrimitiveStringArray(plan.Addresses) + connectionDetails.SetAddresses(addresses) + + if plan.SslThumbprints != nil { + sslThumbprints := util.ConvertBaseStringArrayToPrimitiveStringArray(plan.SslThumbprints) + connectionDetails.SetSslThumbprints(sslThumbprints) + } + + connectionDetails.SetMaxAbsoluteActiveActions(int32(plan.MaxAbsoluteActiveActions.ValueInt64())) + connectionDetails.SetMaxAbsoluteNewActionsPerMinute(int32(plan.MaxAbsoluteNewActionsPerMinute.ValueInt64())) + connectionDetails.SetMaxPowerActionsPercentageOfMachines(int32(plan.MaxPowerActionsPercentageOfMachines.ValueInt64())) + + var body citrixorchestration.CreateHypervisorRequestModel + body.SetConnectionDetails(connectionDetails) + + hypervisor, err := CreateHypervisor(ctx, r.client, &resp.Diagnostics, body) + if err != nil { + // Directly return. Error logs have been populated in common function. + return + } + + // Map response body to schema and populate Computed attribute values + plan = plan.RefreshPropertyValues(hypervisor) + + // Set state to fully populated data + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read implements resource.Resource. +func (r *vsphereHypervisorResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Get current state + var state VsphereHypervisorResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get refreshed hypervisor properties from Orchestration + hypervisorId := state.Id.ValueString() + hypervisor, err := readHypervisor(ctx, r.client, resp, hypervisorId) + if err != nil { + return + } + + if hypervisor.GetConnectionType() != citrixorchestration.HYPERVISORCONNECTIONTYPE_V_CENTER { + resp.Diagnostics.AddError( + "Error reading Hypervisor", + "Hypervisor "+hypervisor.GetName()+" is not a Vsphere connection type hypervisor.", + ) + return + } + + // Overwrite hypervisor with refreshed state + state = state.RefreshPropertyValues(hypervisor) + + // Set refreshed state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Update implements resource.Resource. +func (r *vsphereHypervisorResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Retrieve values from plan + var plan VsphereHypervisorResourceModel + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get refreshed hypervisor properties from Orchestration + hypervisorId := plan.Id.ValueString() + hypervisor, err := util.GetHypervisor(ctx, r.client, &resp.Diagnostics, hypervisorId) + if err != nil { + return + } + + // Construct the update model + var editHypervisorRequestBody citrixorchestration.EditHypervisorConnectionRequestModel + editHypervisorRequestBody.SetName(plan.Name.ValueString()) + editHypervisorRequestBody.SetConnectionType(citrixorchestration.HYPERVISORCONNECTIONTYPE_V_CENTER) + editHypervisorRequestBody.SetUserName(plan.Username.ValueString()) + editHypervisorRequestBody.SetPassword(plan.Password.ValueString()) + pwdFormat, err := citrixorchestration.NewIdentityPasswordFormatFromValue(plan.PasswordFormat.ValueString()) + if err != nil || pwdFormat == nil { + resp.Diagnostics.AddError( + "Error updating Hypervisor for Vsphere", + "Unsupported password format: "+plan.PasswordFormat.ValueString(), + ) + } + editHypervisorRequestBody.SetPasswordFormat(*pwdFormat) + + addresses := util.ConvertBaseStringArrayToPrimitiveStringArray(plan.Addresses) + editHypervisorRequestBody.SetAddresses(addresses) + + sslThumbprints := util.ConvertBaseStringArrayToPrimitiveStringArray(plan.SslThumbprints) + editHypervisorRequestBody.SetSslThumbprints(sslThumbprints) + + editHypervisorRequestBody.SetMaxAbsoluteActiveActions(int32(plan.MaxAbsoluteActiveActions.ValueInt64())) + editHypervisorRequestBody.SetMaxAbsoluteNewActionsPerMinute(int32(plan.MaxAbsoluteNewActionsPerMinute.ValueInt64())) + editHypervisorRequestBody.SetMaxPowerActionsPercentageOfMachines(int32(plan.MaxPowerActionsPercentageOfMachines.ValueInt64())) + + // Patch hypervisor + updatedHypervisor, err := UpdateHypervisor(ctx, r.client, &resp.Diagnostics, hypervisor, editHypervisorRequestBody) + if err != nil { + return + } + + // Update resource state with updated property values + plan = plan.RefreshPropertyValues(updatedHypervisor) + + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete implements resource.Resource. +func (r *vsphereHypervisorResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Retrieve values from state + var state VsphereHypervisorResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Delete existing hypervisor + hypervisorId := state.Id.ValueString() + hypervisorName := state.Name.ValueString() + deleteHypervisorRequest := r.client.ApiClient.HypervisorsAPIsDAAS.HypervisorsDeleteHypervisor(ctx, hypervisorId) + httpResp, err := citrixdaasclient.AddRequestData(deleteHypervisorRequest, r.client).Execute() + if err != nil && httpResp.StatusCode != http.StatusNotFound { + resp.Diagnostics.AddError( + "Error deleting Hypervisor "+hypervisorName, + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + return + } +} diff --git a/internal/daas/hypervisor/vsphere_hypervisor_resource_model.go b/internal/daas/hypervisor/vsphere_hypervisor_resource_model.go new file mode 100644 index 0000000..d90286d --- /dev/null +++ b/internal/daas/hypervisor/vsphere_hypervisor_resource_model.go @@ -0,0 +1,47 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package hypervisor + +import ( + citrixorchestration "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + "github.com/citrix/terraform-provider-citrix/internal/util" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// HypervisorResourceModel maps the resource schema data. +type VsphereHypervisorResourceModel struct { + /**** Connection Details ****/ + Id types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Zone types.String `tfsdk:"zone"` + /** Vsphere Connection **/ + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` + PasswordFormat types.String `tfsdk:"password_format"` + Addresses []types.String `tfsdk:"addresses"` + SslThumbprints []types.String `tfsdk:"ssl_thumbprints"` + MaxAbsoluteActiveActions types.Int64 `tfsdk:"max_absolute_active_actions"` + MaxAbsoluteNewActionsPerMinute types.Int64 `tfsdk:"max_absolute_new_actions_per_minute"` + MaxPowerActionsPercentageOfMachines types.Int64 `tfsdk:"max_power_actions_percentage_of_machines"` +} + +func (r VsphereHypervisorResourceModel) RefreshPropertyValues(hypervisor *citrixorchestration.HypervisorDetailResponseModel) VsphereHypervisorResourceModel { + r.Id = types.StringValue(hypervisor.GetId()) + r.Name = types.StringValue(hypervisor.GetName()) + r.Username = types.StringValue(hypervisor.GetUserName()) + r.Addresses = util.RefreshList(r.Addresses, hypervisor.GetAddresses()) + r.MaxAbsoluteActiveActions = types.Int64Value(int64(hypervisor.GetMaxAbsoluteActiveActions())) + r.MaxAbsoluteNewActionsPerMinute = types.Int64Value(int64(hypervisor.GetMaxAbsoluteNewActionsPerMinute())) + r.MaxPowerActionsPercentageOfMachines = types.Int64Value(int64(hypervisor.GetMaxPowerActionsPercentageOfMachines())) + + sslThumbprints := util.RefreshList(r.SslThumbprints, hypervisor.GetSslThumbprints()) + if len(sslThumbprints) > 0 { + r.SslThumbprints = sslThumbprints + } else { + r.SslThumbprints = nil + } + + hypZone := hypervisor.GetZone() + r.Zone = types.StringValue(hypZone.GetId()) + return r +} diff --git a/internal/daas/hypervisor/xenserver_hypervisor_resource.go b/internal/daas/hypervisor/xenserver_hypervisor_resource.go new file mode 100644 index 0000000..b6928d1 --- /dev/null +++ b/internal/daas/hypervisor/xenserver_hypervisor_resource.go @@ -0,0 +1,346 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package hypervisor + +import ( + "context" + "net/http" + "regexp" + + citrixorchestration "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + citrixdaasclient "github.com/citrix/citrix-daas-rest-go/client" + "github.com/citrix/terraform-provider-citrix/internal/util" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &xenserverHypervisorResource{} + _ resource.ResourceWithConfigure = &xenserverHypervisorResource{} + _ resource.ResourceWithImportState = &xenserverHypervisorResource{} +) + +// NewHypervisorResource is a helper function to simplify the provider implementation. +func NewXenserverHypervisorResource() resource.Resource { + return &xenserverHypervisorResource{} +} + +type xenserverHypervisorResource struct { + client *citrixdaasclient.CitrixDaasClient +} + +// Metadata implements resource.Resource. +func (*xenserverHypervisorResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_xenserver_hypervisor" +} + +// Configure implements resource.ResourceWithConfigure. +func (r *xenserverHypervisorResource) Configure(_ context.Context, req resource.ConfigureRequest, _ *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + r.client = req.ProviderData.(*citrixdaasclient.CitrixDaasClient) +} + +// Schema implements resource.Resource. +func (r *xenserverHypervisorResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Manages a XenServer hypervisor.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "GUID identifier of the hypervisor.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "name": schema.StringAttribute{ + Description: "Name of the hypervisor.", + Required: true, + }, + "zone": schema.StringAttribute{ + Description: "Id of the zone the hypervisor is associated with.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.RegexMatches(regexp.MustCompile(util.GuidRegex), "must be specified with ID in GUID format"), + }, + }, + "username": schema.StringAttribute{ + Description: "Username of the hypervisor.", + Required: true, + }, + "password": schema.StringAttribute{ + Description: "Password of the hypervisor.", + Required: true, + }, + "password_format": schema.StringAttribute{ + Description: "Password format of the hypervisor. Choose between Base64 and PlainText.", + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf( + string(citrixorchestration.IDENTITYPASSWORDFORMAT_BASE64), + string(citrixorchestration.IDENTITYPASSWORDFORMAT_PLAIN_TEXT), + ), + }, + }, + "addresses": schema.ListAttribute{ + ElementType: types.StringType, + Description: "Hypervisor address(es). At least one is required.", + Required: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + listvalidator.ValueStringsAre( + stringvalidator.RegexMatches(regexp.MustCompile(util.IPv4RegexWithProtocol), "must be a valid IPv4 address prefixed with protoccol (http:// or https://)"), + ), + }, + }, + "ssl_thumbprints": schema.ListAttribute{ + ElementType: types.StringType, + Description: "SSL certificate thumbprints to consider acceptable for this connection. If not specified, and the hypervisor uses SSL for its connection, the SSL certificate's root certification authority and any intermediate certificates must be trusted.", + Optional: true, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + listvalidator.ValueStringsAre( + stringvalidator.RegexMatches(regexp.MustCompile(util.SslThumbprintRegex), "must be specified with SSL thumbprint without colons"), + ), + }, + }, + "max_absolute_active_actions": schema.Int64Attribute{ + Description: "Maximum number of actions that can execute in parallel on the hypervisor. Default is 40.", + Optional: true, + Computed: true, + Default: int64default.StaticInt64(40), + Validators: []validator.Int64{ + int64validator.AtLeast(1), + }, + }, + "max_absolute_new_actions_per_minute": schema.Int64Attribute{ + Description: "Maximum number of actions that can be started on the hypervisor per-minute. Default is 10.", + Optional: true, + Computed: true, + Default: int64default.StaticInt64(10), + Validators: []validator.Int64{ + int64validator.AtLeast(1), + }, + }, + "max_power_actions_percentage_of_machines": schema.Int64Attribute{ + Description: "Maximum percentage of machines on the hypervisor which can have their power state changed simultaneously. Default is 20.", + Optional: true, + Computed: true, + Default: int64default.StaticInt64(20), + Validators: []validator.Int64{ + int64validator.AtLeast(1), + }, + }, + }, + } +} + +// ImportState implements resource.ResourceWithImportState. +func (*xenserverHypervisorResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// Create implements resource.Resource. +func (r *xenserverHypervisorResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Retrieve values from plan + var plan XenserverHypervisorResourceModel + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + /* Generate ConnectionDetails API request body from plan */ + var connectionDetails citrixorchestration.HypervisorConnectionDetailRequestModel + connectionDetails.SetName(plan.Name.ValueString()) + connectionDetails.SetZone(plan.Zone.ValueString()) + connectionDetails.SetConnectionType(citrixorchestration.HYPERVISORCONNECTIONTYPE_XEN_SERVER) + connectionDetails.SetUserName(plan.Username.ValueString()) + connectionDetails.SetPassword(plan.Password.ValueString()) + pwdFormat, err := citrixorchestration.NewIdentityPasswordFormatFromValue(plan.PasswordFormat.ValueString()) + if err != nil || pwdFormat == nil { + resp.Diagnostics.AddError( + "Error creating Hypervisor for XenServer", + "Unsupported password format: "+plan.PasswordFormat.ValueString(), + ) + } + connectionDetails.SetPasswordFormat(*pwdFormat) + + addresses := util.ConvertBaseStringArrayToPrimitiveStringArray(plan.Addresses) + connectionDetails.SetAddresses(addresses) + + if plan.SslThumbprints != nil { + sslThumbprints := util.ConvertBaseStringArrayToPrimitiveStringArray(plan.SslThumbprints) + connectionDetails.SetSslThumbprints(sslThumbprints) + } + + connectionDetails.SetMaxAbsoluteActiveActions(int32(plan.MaxAbsoluteActiveActions.ValueInt64())) + connectionDetails.SetMaxAbsoluteNewActionsPerMinute(int32(plan.MaxAbsoluteNewActionsPerMinute.ValueInt64())) + connectionDetails.SetMaxPowerActionsPercentageOfMachines(int32(plan.MaxPowerActionsPercentageOfMachines.ValueInt64())) + + var body citrixorchestration.CreateHypervisorRequestModel + body.SetConnectionDetails(connectionDetails) + + hypervisor, err := CreateHypervisor(ctx, r.client, &resp.Diagnostics, body) + if err != nil { + // Directly return. Error logs have been populated in common function. + return + } + + // Map response body to schema and populate Computed attribute values + plan = plan.RefreshPropertyValues(hypervisor) + + // Set state to fully populated data + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read implements resource.Resource. +func (r *xenserverHypervisorResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Get current state + var state XenserverHypervisorResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get refreshed hypervisor properties from Orchestration + hypervisorId := state.Id.ValueString() + hypervisor, err := readHypervisor(ctx, r.client, resp, hypervisorId) + if err != nil { + return + } + + if hypervisor.GetConnectionType() != citrixorchestration.HYPERVISORCONNECTIONTYPE_XEN_SERVER { + resp.Diagnostics.AddError( + "Error reading Hypervisor", + "Hypervisor "+hypervisor.GetName()+" is not a XenServer connection type hypervisor.", + ) + return + } + + // Overwrite hypervisor with refreshed state + state = state.RefreshPropertyValues(hypervisor) + + // Set refreshed state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Update implements resource.Resource. +func (r *xenserverHypervisorResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Retrieve values from plan + var plan XenserverHypervisorResourceModel + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get refreshed hypervisor properties from Orchestration + hypervisorId := plan.Id.ValueString() + hypervisor, err := util.GetHypervisor(ctx, r.client, &resp.Diagnostics, hypervisorId) + if err != nil { + return + } + + // Construct the update model + var editHypervisorRequestBody citrixorchestration.EditHypervisorConnectionRequestModel + editHypervisorRequestBody.SetName(plan.Name.ValueString()) + editHypervisorRequestBody.SetConnectionType(citrixorchestration.HYPERVISORCONNECTIONTYPE_XEN_SERVER) + editHypervisorRequestBody.SetUserName(plan.Username.ValueString()) + editHypervisorRequestBody.SetPassword(plan.Password.ValueString()) + pwdFormat, err := citrixorchestration.NewIdentityPasswordFormatFromValue(plan.PasswordFormat.ValueString()) + if err != nil || pwdFormat == nil { + resp.Diagnostics.AddError( + "Error updating Hypervisor for XenServer", + "Unsupported password format: "+plan.PasswordFormat.ValueString(), + ) + } + editHypervisorRequestBody.SetPasswordFormat(*pwdFormat) + + addresses := util.ConvertBaseStringArrayToPrimitiveStringArray(plan.Addresses) + editHypervisorRequestBody.SetAddresses(addresses) + + sslThumbprints := util.ConvertBaseStringArrayToPrimitiveStringArray(plan.SslThumbprints) + editHypervisorRequestBody.SetSslThumbprints(sslThumbprints) + + editHypervisorRequestBody.SetMaxAbsoluteActiveActions(int32(plan.MaxAbsoluteActiveActions.ValueInt64())) + editHypervisorRequestBody.SetMaxAbsoluteNewActionsPerMinute(int32(plan.MaxAbsoluteNewActionsPerMinute.ValueInt64())) + editHypervisorRequestBody.SetMaxPowerActionsPercentageOfMachines(int32(plan.MaxPowerActionsPercentageOfMachines.ValueInt64())) + + // Patch hypervisor + updatedHypervisor, err := UpdateHypervisor(ctx, r.client, &resp.Diagnostics, hypervisor, editHypervisorRequestBody) + if err != nil { + return + } + + // Update resource state with updated property values + plan = plan.RefreshPropertyValues(updatedHypervisor) + + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete implements resource.Resource. +func (r *xenserverHypervisorResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Retrieve values from state + var state XenserverHypervisorResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Delete existing hypervisor + hypervisorId := state.Id.ValueString() + hypervisorName := state.Name.ValueString() + deleteHypervisorRequest := r.client.ApiClient.HypervisorsAPIsDAAS.HypervisorsDeleteHypervisor(ctx, hypervisorId) + httpResp, err := citrixdaasclient.AddRequestData(deleteHypervisorRequest, r.client).Execute() + if err != nil && httpResp.StatusCode != http.StatusNotFound { + resp.Diagnostics.AddError( + "Error deleting Hypervisor "+hypervisorName, + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + return + } +} diff --git a/internal/daas/hypervisor/xenserver_hypervisor_resource_model.go b/internal/daas/hypervisor/xenserver_hypervisor_resource_model.go new file mode 100644 index 0000000..8de9a0b --- /dev/null +++ b/internal/daas/hypervisor/xenserver_hypervisor_resource_model.go @@ -0,0 +1,47 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package hypervisor + +import ( + citrixorchestration "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + "github.com/citrix/terraform-provider-citrix/internal/util" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// HypervisorResourceModel maps the resource schema data. +type XenserverHypervisorResourceModel struct { + /**** Connection Details ****/ + Id types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Zone types.String `tfsdk:"zone"` + /** Xenserver Connection **/ + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` + PasswordFormat types.String `tfsdk:"password_format"` + Addresses []types.String `tfsdk:"addresses"` + SslThumbprints []types.String `tfsdk:"ssl_thumbprints"` + MaxAbsoluteActiveActions types.Int64 `tfsdk:"max_absolute_active_actions"` + MaxAbsoluteNewActionsPerMinute types.Int64 `tfsdk:"max_absolute_new_actions_per_minute"` + MaxPowerActionsPercentageOfMachines types.Int64 `tfsdk:"max_power_actions_percentage_of_machines"` +} + +func (r XenserverHypervisorResourceModel) RefreshPropertyValues(hypervisor *citrixorchestration.HypervisorDetailResponseModel) XenserverHypervisorResourceModel { + r.Id = types.StringValue(hypervisor.GetId()) + r.Name = types.StringValue(hypervisor.GetName()) + r.Username = types.StringValue(hypervisor.GetUserName()) + r.Addresses = util.RefreshList(r.Addresses, hypervisor.GetAddresses()) + r.MaxAbsoluteActiveActions = types.Int64Value(int64(hypervisor.GetMaxAbsoluteActiveActions())) + r.MaxAbsoluteNewActionsPerMinute = types.Int64Value(int64(hypervisor.GetMaxAbsoluteNewActionsPerMinute())) + r.MaxPowerActionsPercentageOfMachines = types.Int64Value(int64(hypervisor.GetMaxPowerActionsPercentageOfMachines())) + + sslThumbprints := util.RefreshList(r.SslThumbprints, hypervisor.GetSslThumbprints()) + if len(sslThumbprints) > 0 { + r.SslThumbprints = sslThumbprints + } else { + r.SslThumbprints = nil + } + + hypZone := hypervisor.GetZone() + r.Zone = types.StringValue(hypZone.GetId()) + return r +} diff --git a/internal/daas/resources/hypervisor_resource_pool/aws_hypervisor_resource_pool_resource.go b/internal/daas/hypervisor_resource_pool/aws_hypervisor_resource_pool_resource.go similarity index 99% rename from internal/daas/resources/hypervisor_resource_pool/aws_hypervisor_resource_pool_resource.go rename to internal/daas/hypervisor_resource_pool/aws_hypervisor_resource_pool_resource.go index 3b73782..eda7f6d 100644 --- a/internal/daas/resources/hypervisor_resource_pool/aws_hypervisor_resource_pool_resource.go +++ b/internal/daas/hypervisor_resource_pool/aws_hypervisor_resource_pool_resource.go @@ -43,7 +43,7 @@ type awsHypervisorResourcePoolResource struct { // Metadata returns the resource type name. func (r *awsHypervisorResourcePoolResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_daas_aws_hypervisor_resource_pool" + resp.TypeName = req.ProviderTypeName + "_aws_hypervisor_resource_pool" } func (r *awsHypervisorResourcePoolResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { diff --git a/internal/daas/resources/hypervisor_resource_pool/aws_hypervisor_resource_pool_resource_model.go b/internal/daas/hypervisor_resource_pool/aws_hypervisor_resource_pool_resource_model.go similarity index 100% rename from internal/daas/resources/hypervisor_resource_pool/aws_hypervisor_resource_pool_resource_model.go rename to internal/daas/hypervisor_resource_pool/aws_hypervisor_resource_pool_resource_model.go diff --git a/internal/daas/resources/hypervisor_resource_pool/azure_hypervisor_resource_pool_resource.go b/internal/daas/hypervisor_resource_pool/azure_hypervisor_resource_pool_resource.go similarity index 97% rename from internal/daas/resources/hypervisor_resource_pool/azure_hypervisor_resource_pool_resource.go rename to internal/daas/hypervisor_resource_pool/azure_hypervisor_resource_pool_resource.go index 0ebea6c..1ec741e 100644 --- a/internal/daas/resources/hypervisor_resource_pool/azure_hypervisor_resource_pool_resource.go +++ b/internal/daas/hypervisor_resource_pool/azure_hypervisor_resource_pool_resource.go @@ -44,7 +44,7 @@ type azureHypervisorResourcePoolResource struct { // Metadata returns the resource type name. func (r *azureHypervisorResourcePoolResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_daas_azure_hypervisor_resource_pool" + resp.TypeName = req.ProviderTypeName + "_azure_hypervisor_resource_pool" } func (r *azureHypervisorResourcePoolResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { @@ -161,7 +161,7 @@ func (r *azureHypervisorResourcePoolResource) Create(ctx context.Context, req re ) return } - region, err := util.GetSingleHypervisorResource(ctx, r.client, hypervisorId, "", plan.Region.ValueString(), "", "", citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM) + region, err := util.GetSingleHypervisorResource(ctx, r.client, hypervisorId, "", plan.Region.ValueString(), "Region", "", hypervisor) regionPath := region.GetXDPath() if err != nil { resp.Diagnostics.AddError( @@ -171,7 +171,7 @@ func (r *azureHypervisorResourcePoolResource) Create(ctx context.Context, req re return } resourcePoolDetails.SetRegion(regionPath) - vnet, err := util.GetSingleHypervisorResource(ctx, r.client, hypervisorId, fmt.Sprintf("%s/virtualprivatecloud.folder", regionPath), plan.VirtualNetwork.ValueString(), "VirtualPrivateCloud", plan.VirtualNetworkResourceGroup.ValueString(), citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM) + vnet, err := util.GetSingleHypervisorResource(ctx, r.client, hypervisorId, fmt.Sprintf("%s/virtualprivatecloud.folder", regionPath), plan.VirtualNetwork.ValueString(), "VirtualPrivateCloud", plan.VirtualNetworkResourceGroup.ValueString(), hypervisor) vnetPath := vnet.GetXDPath() if err != nil { resp.Diagnostics.AddError( diff --git a/internal/daas/resources/hypervisor_resource_pool/azure_hypervisor_resource_pool_resource_model.go b/internal/daas/hypervisor_resource_pool/azure_hypervisor_resource_pool_resource_model.go similarity index 100% rename from internal/daas/resources/hypervisor_resource_pool/azure_hypervisor_resource_pool_resource_model.go rename to internal/daas/hypervisor_resource_pool/azure_hypervisor_resource_pool_resource_model.go diff --git a/internal/daas/resources/hypervisor_resource_pool/gcp_hypervisor_resource_pool_resource.go b/internal/daas/hypervisor_resource_pool/gcp_hypervisor_resource_pool_resource.go similarity index 99% rename from internal/daas/resources/hypervisor_resource_pool/gcp_hypervisor_resource_pool_resource.go rename to internal/daas/hypervisor_resource_pool/gcp_hypervisor_resource_pool_resource.go index f50761d..ec9e1d4 100644 --- a/internal/daas/resources/hypervisor_resource_pool/gcp_hypervisor_resource_pool_resource.go +++ b/internal/daas/hypervisor_resource_pool/gcp_hypervisor_resource_pool_resource.go @@ -45,7 +45,7 @@ type gcpHypervisorResourcePoolResource struct { // Metadata returns the resource type name. func (r *gcpHypervisorResourcePoolResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_daas_gcp_hypervisor_resource_pool" + resp.TypeName = req.ProviderTypeName + "_gcp_hypervisor_resource_pool" } func (r *gcpHypervisorResourcePoolResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { diff --git a/internal/daas/resources/hypervisor_resource_pool/gcp_hypervisor_resource_pool_resource_model.go b/internal/daas/hypervisor_resource_pool/gcp_hypervisor_resource_pool_resource_model.go similarity index 100% rename from internal/daas/resources/hypervisor_resource_pool/gcp_hypervisor_resource_pool_resource_model.go rename to internal/daas/hypervisor_resource_pool/gcp_hypervisor_resource_pool_resource_model.go diff --git a/internal/daas/resources/hypervisor_resource_pool/hypervisor_resource_pool_common.go b/internal/daas/hypervisor_resource_pool/hypervisor_resource_pool_common.go similarity index 97% rename from internal/daas/resources/hypervisor_resource_pool/hypervisor_resource_pool_common.go rename to internal/daas/hypervisor_resource_pool/hypervisor_resource_pool_common.go index 423afb6..88fc897 100644 --- a/internal/daas/resources/hypervisor_resource_pool/hypervisor_resource_pool_common.go +++ b/internal/daas/hypervisor_resource_pool/hypervisor_resource_pool_common.go @@ -28,7 +28,7 @@ func CreateHypervisorResourcePool(ctx context.Context, client *citrixdaasclient. return nil, err } - err = util.ProcessAsyncJobResponse(ctx, client, httpResp, "Error creating Resource Pool for Hypervisor "+hypervisor.GetName(), diagnostics, 10) + err = util.ProcessAsyncJobResponse(ctx, client, httpResp, "Error creating Resource Pool for Hypervisor "+hypervisor.GetName(), diagnostics, 10, true) if err != nil { return nil, err } @@ -57,7 +57,7 @@ func UpdateHypervisorResourcePool(ctx context.Context, client *citrixdaasclient. return nil, err } - err = util.ProcessAsyncJobResponse(ctx, client, httpResp, "Error updating Resource Pool "+resourcePoolId, diagnostics, 5) + err = util.ProcessAsyncJobResponse(ctx, client, httpResp, "Error updating Resource Pool "+resourcePoolId, diagnostics, 5, true) if err != nil { return nil, err } diff --git a/internal/daas/hypervisor_resource_pool/xenserver_hypervisor_resource_pool_resource.go b/internal/daas/hypervisor_resource_pool/xenserver_hypervisor_resource_pool_resource.go new file mode 100644 index 0000000..4727c21 --- /dev/null +++ b/internal/daas/hypervisor_resource_pool/xenserver_hypervisor_resource_pool_resource.go @@ -0,0 +1,362 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package hypervisor_resource_pool + +import ( + "context" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + citrixdaasclient "github.com/citrix/citrix-daas-rest-go/client" + "github.com/citrix/terraform-provider-citrix/internal/util" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &xenserverHypervisorResourcePoolResource{} + _ resource.ResourceWithConfigure = &xenserverHypervisorResourcePoolResource{} + _ resource.ResourceWithImportState = &xenserverHypervisorResourcePoolResource{} +) + +// NewHypervisorResourcePoolResource is a helper function to simplify the provider implementation. +func NewXenserverHypervisorResourcePoolResource() resource.Resource { + return &xenserverHypervisorResourcePoolResource{} +} + +// hypervisorResource is the resource implementation. +type xenserverHypervisorResourcePoolResource struct { + client *citrixdaasclient.CitrixDaasClient +} + +// Metadata returns the resource type name. +func (r *xenserverHypervisorResourcePoolResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_xenserver_hypervisor_resource_pool" +} + +func (r *xenserverHypervisorResourcePoolResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Manages an XenServer hypervisor resource pool.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "GUID identifier of the resource pool.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "name": schema.StringAttribute{ + Description: "Name of the resource pool. Name should be unique across all hypervisors.", + Required: true, + }, + "hypervisor": schema.StringAttribute{ + Description: "Id of the hypervisor for which the resource pool needs to be created.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.RegexMatches(regexp.MustCompile(util.GuidRegex), "must be specified with ID in GUID format"), + }, + }, + "networks": schema.ListAttribute{ + ElementType: types.StringType, + Description: "List of networks for allocating resources.", + Required: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + }, + "storage": schema.ListAttribute{ + ElementType: types.StringType, + Description: "List of hypervisor storage to use for OS data.", + Required: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + }, + "temporary_storage": schema.ListAttribute{ + ElementType: types.StringType, + Description: "List of hypervisor storage to use for temporary data.", + Required: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + }, + "use_local_storage_caching": schema.BoolAttribute{ + Description: "Indicate whether intellicache is enabled to reduce load on the shared storage device. Will only be affective when shared storage is used.", + Optional: true, + Computed: true, + Default: booldefault.StaticBool(false), + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +// Configure adds the provider configured client to the resource. +func (r *xenserverHypervisorResourcePoolResource) Configure(_ context.Context, req resource.ConfigureRequest, _ *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + r.client = req.ProviderData.(*citrixdaasclient.CitrixDaasClient) +} + +func (r *xenserverHypervisorResourcePoolResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + var plan XenserverHypervisorResourcePoolResourceModel + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + hypervisorId := plan.Hypervisor.ValueString() + hypervisor, err := util.GetHypervisor(ctx, r.client, &resp.Diagnostics, hypervisorId) + + if err != nil { + return + } + + var resourcePoolDetails citrixorchestration.CreateHypervisorResourcePoolRequestModel + hypervisorConnectionType := hypervisor.GetConnectionType() + if hypervisorConnectionType != citrixorchestration.HYPERVISORCONNECTIONTYPE_XEN_SERVER { + resp.Diagnostics.AddError( + "Error creating Azure Resource Pool for Hypervisor", + "Unsupported hypervisor connection type.", + ) + return + } + + resourcePoolDetails.SetName(plan.Name.ValueString()) + resourcePoolDetails.SetConnectionType(hypervisorConnectionType) + + storages, tempStorages, networks := SetResourceList(ctx, r.client, &resp.Diagnostics, hypervisorId, hypervisorConnectionType, plan) + if storages == nil || tempStorages == nil || networks == nil { + // Error handled in helper function. + return + } + resourcePoolDetails.SetStorage(storages) + resourcePoolDetails.SetTemporaryStorage(tempStorages) + resourcePoolDetails.SetNetworks(networks) + + resourcePoolDetails.SetUseLocalStorageCaching(plan.UseLocalStorageCaching.ValueBool()) + + resourcePool, err := CreateHypervisorResourcePool(ctx, r.client, &resp.Diagnostics, *hypervisor, resourcePoolDetails) + if err != nil { + // Directly return. Error logs have been populated in common function + return + } + + plan = plan.RefreshPropertyValues(resourcePool) + + // Set state to fully populated data + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + +} + +func (r *xenserverHypervisorResourcePoolResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + var state XenserverHypervisorResourcePoolResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get hypervisor properties from Orchestration + hypervisorId := state.Hypervisor.ValueString() + + // Get the resource pool + resourcePool, err := ReadHypervisorResourcePool(ctx, r.client, resp, hypervisorId, state.Id.ValueString()) + if err != nil { + return + } + + // Override with refreshed state + state = state.RefreshPropertyValues(resourcePool) + + // Set refreshed state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + +} + +func (r *xenserverHypervisorResourcePoolResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + var plan XenserverHypervisorResourcePoolResourceModel + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + hypervisorId := plan.Hypervisor.ValueString() + hypervisor, err := util.GetHypervisor(ctx, r.client, &resp.Diagnostics, hypervisorId) + + if err != nil { + return + } + + hypervisorConnectionType := hypervisor.GetConnectionType() + if hypervisorConnectionType != citrixorchestration.HYPERVISORCONNECTIONTYPE_XEN_SERVER { + resp.Diagnostics.AddError( + "Error creating Azure Resource Pool for Hypervisor", + "Unsupported hypervisor connection type.", + ) + return + } + + var state XenserverHypervisorResourcePoolResourceModel + diags = req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var editHypervisorResourcePool citrixorchestration.EditHypervisorResourcePoolRequestModel + editHypervisorResourcePool.SetName(plan.Name.ValueString()) + editHypervisorResourcePool.SetConnectionType(citrixorchestration.HYPERVISORCONNECTIONTYPE_XEN_SERVER) + + storages, tempStorages, networks := SetResourceList(ctx, r.client, &resp.Diagnostics, hypervisorId, hypervisorConnectionType, plan) + if storages == nil || tempStorages == nil || networks == nil { + // Error handled in helper function. + return + } + + storageRequests := []citrixorchestration.HypervisorResourcePoolStorageRequestModel{} + for _, storage := range storages { + request := &citrixorchestration.HypervisorResourcePoolStorageRequestModel{} + request.SetStoragePath(storage) + storageRequests = append(storageRequests, *request) + } + editHypervisorResourcePool.SetStorage(storageRequests) + + tempStorageRequests := []citrixorchestration.HypervisorResourcePoolStorageRequestModel{} + for _, storage := range tempStorages { + request := &citrixorchestration.HypervisorResourcePoolStorageRequestModel{} + request.SetStoragePath(storage) + tempStorageRequests = append(tempStorageRequests, *request) + } + editHypervisorResourcePool.SetTemporaryStorage(tempStorageRequests) + + editHypervisorResourcePool.SetNetworks(networks) + + editHypervisorResourcePool.SetUseLocalStorageCaching(plan.UseLocalStorageCaching.ValueBool()) + + updatedResourcePool, err := UpdateHypervisorResourcePool(ctx, r.client, &resp.Diagnostics, plan.Hypervisor.ValueString(), plan.Id.ValueString(), editHypervisorResourcePool) + if err != nil { + return + } + + plan = plan.RefreshPropertyValues(updatedResourcePool) + + // Set state to fully populated data + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + +} + +func (r *xenserverHypervisorResourcePoolResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, ",") + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: hypervisorId,hypervisorResourcePoolId. Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("hypervisor"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), idParts[1])...) + +} + +func (r *xenserverHypervisorResourcePoolResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + var state XenserverHypervisorResourcePoolResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Delete resource pool + hypervisorId := state.Hypervisor.ValueString() + deleteHypervisorResourcePoolRequest := r.client.ApiClient.HypervisorsAPIsDAAS.HypervisorsDeleteHypervisorResourcePool(ctx, hypervisorId, state.Id.ValueString()) + httpResp, err := citrixdaasclient.AddRequestData(deleteHypervisorResourcePoolRequest, r.client).Execute() + if err != nil && httpResp.StatusCode != http.StatusNotFound { + resp.Diagnostics.AddError( + "Error deleting Resource Pool for Hypervisor "+hypervisorId, + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + return + } +} + +func SetResourceList(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, diags *diag.Diagnostics, hypervisorId string, hypervisorConnectionType citrixorchestration.HypervisorConnectionType, plan XenserverHypervisorResourcePoolResourceModel) ([]string, []string, []string) { + storageNames := util.ConvertBaseStringArrayToPrimitiveStringArray(plan.Storage) + storages, err := util.GetFilteredResourcePathList(ctx, client, hypervisorId, "", "storage", storageNames, hypervisorConnectionType) + if err != nil { + diags.AddError( + "Error creating Hypervisor Resource Pool for XenServer", + "Error message: "+util.ReadClientError(err), + ) + return nil, nil, nil + } + + tempStorageNames := util.ConvertBaseStringArrayToPrimitiveStringArray(plan.TemporaryStorage) + tempStorages, err := util.GetFilteredResourcePathList(ctx, client, hypervisorId, "", "storage", tempStorageNames, hypervisorConnectionType) + if err != nil { + diags.AddError( + "Error creating Hypervisor Resource Pool for XenServer", + "Error message: "+util.ReadClientError(err), + ) + return nil, nil, nil + } + + networkNames := util.ConvertBaseStringArrayToPrimitiveStringArray(plan.Networks) + networks, err := util.GetFilteredResourcePathList(ctx, client, hypervisorId, "", "network", networkNames, hypervisorConnectionType) + if err != nil { + diags.AddError( + "Error creating Hypervisor Resource Pool for XenServer", + "Error message: "+util.ReadClientError(err), + ) + return nil, nil, nil + } + + return storages, tempStorages, networks +} diff --git a/internal/daas/hypervisor_resource_pool/xenserver_hypervisor_resource_pool_resource_model.go b/internal/daas/hypervisor_resource_pool/xenserver_hypervisor_resource_pool_resource_model.go new file mode 100644 index 0000000..59b06db --- /dev/null +++ b/internal/daas/hypervisor_resource_pool/xenserver_hypervisor_resource_pool_resource_model.go @@ -0,0 +1,51 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package hypervisor_resource_pool + +import ( + "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + "github.com/citrix/terraform-provider-citrix/internal/util" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type XenserverHypervisorResourcePoolResourceModel struct { + Id types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Hypervisor types.String `tfsdk:"hypervisor"` + /**** Resource Pool Details ****/ + Networks []types.String `tfsdk:"networks"` + Storage []types.String `tfsdk:"storage"` + TemporaryStorage []types.String `tfsdk:"temporary_storage"` + UseLocalStorageCaching types.Bool `tfsdk:"use_local_storage_caching"` +} + +func (r XenserverHypervisorResourcePoolResourceModel) RefreshPropertyValues(resourcePool *citrixorchestration.HypervisorResourcePoolDetailResponseModel) XenserverHypervisorResourcePoolResourceModel { + + r.Id = types.StringValue(resourcePool.GetId()) + r.Name = types.StringValue(resourcePool.GetName()) + + hypervisorConnection := resourcePool.GetHypervisorConnection() + r.Hypervisor = types.StringValue(hypervisorConnection.GetId()) + + r.UseLocalStorageCaching = types.BoolValue(resourcePool.GetUseLocalStorageCaching()) + + remoteNetwork := []string{} + for _, network := range resourcePool.GetNetworks() { + remoteNetwork = append(remoteNetwork, network.GetName()) + } + r.Networks = util.RefreshList(r.Networks, remoteNetwork) + + remoteStorage := []string{} + for _, storage := range resourcePool.GetStorage() { + remoteStorage = append(remoteStorage, storage.GetName()) + } + r.Storage = util.RefreshList(r.Storage, remoteStorage) + + remoteTempStorage := []string{} + for _, storage := range resourcePool.GetTemporaryStorage() { + remoteTempStorage = append(remoteTempStorage, storage.GetName()) + } + r.TemporaryStorage = util.RefreshList(r.TemporaryStorage, remoteTempStorage) + + return r +} diff --git a/internal/daas/machine_catalog/machine_catalog_common_utils.go b/internal/daas/machine_catalog/machine_catalog_common_utils.go new file mode 100644 index 0000000..2d2f905 --- /dev/null +++ b/internal/daas/machine_catalog/machine_catalog_common_utils.go @@ -0,0 +1,340 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package machine_catalog + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + citrixdaasclient "github.com/citrix/citrix-daas-rest-go/client" + "github.com/citrix/terraform-provider-citrix/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func getRequestModelForCreateMachineCatalog(plan MachineCatalogResourceModel, ctx context.Context, client *citrixdaasclient.CitrixDaasClient, diagnostics *diag.Diagnostics, connectionType *citrixorchestration.HypervisorConnectionType, isOnPremises bool) (*citrixorchestration.CreateMachineCatalogRequestModel, error) { + provisioningType, err := citrixorchestration.NewProvisioningTypeFromValue(plan.ProvisioningType.ValueString()) + if err != nil { + diagnostics.AddError( + "Error creating Machine Catalog", + "Unsupported provisioning type.", + ) + + return nil, err + } + + var machinesRequest []citrixorchestration.AddMachineToMachineCatalogRequestModel + var body citrixorchestration.CreateMachineCatalogRequestModel + + isRemotePcCatalog := plan.IsRemotePc.ValueBool() + + // Generate API request body from plan + body.SetName(plan.Name.ValueString()) + body.SetDescription(plan.Description.ValueString()) + body.SetProvisioningType(*provisioningType) // Only support MCS and Manual. Block other types + body.SetMinimumFunctionalLevel(citrixorchestration.FUNCTIONALLEVEL_L7_20) // Hard-coding VDA feature level to be same as QCS + allocationType, err := citrixorchestration.NewAllocationTypeFromValue(plan.AllocationType.ValueString()) + if err != nil { + diagnostics.AddError( + "Error creating Machine Catalog", + "Unsupported allocation type.", + ) + return nil, err + } + body.SetAllocationType(*allocationType) + sessionSupport, err := citrixorchestration.NewSessionSupportFromValue(plan.SessionSupport.ValueString()) + if err != nil { + diagnostics.AddError( + "Error creating Machine Catalog", + "Unsupported session support.", + ) + return nil, err + } + body.SetSessionSupport(*sessionSupport) + persistChanges := citrixorchestration.PERSISTCHANGES_DISCARD + if *sessionSupport == citrixorchestration.SESSIONSUPPORT_SINGLE_SESSION && *allocationType == citrixorchestration.ALLOCATIONTYPE_STATIC { + persistChanges = citrixorchestration.PERSISTCHANGES_ON_LOCAL + } + body.SetPersistUserChanges(persistChanges) + body.SetZone(plan.Zone.ValueString()) + if !plan.VdaUpgradeType.IsNull() { + body.SetVdaUpgradeType(citrixorchestration.VdaUpgradeType(plan.VdaUpgradeType.ValueString())) + } else { + body.SetVdaUpgradeType(citrixorchestration.VDAUPGRADETYPE_NOT_SET) + } + + if *provisioningType == citrixorchestration.PROVISIONINGTYPE_MCS { + provisioningScheme, err := getProvSchemeForMcsCatalog(plan, ctx, client, diagnostics, isOnPremises) + if err != nil { + return nil, err + } + body.SetProvisioningScheme(*provisioningScheme) + return &body, nil + } + + // Manual type catalogs + machineType := citrixorchestration.MACHINETYPE_VIRTUAL + if !plan.IsPowerManaged.ValueBool() { + machineType = citrixorchestration.MACHINETYPE_PHYSICAL + } + + body.SetMachineType(machineType) + body.SetIsRemotePC(plan.IsRemotePc.ValueBool()) + + if isRemotePcCatalog { + remotePCEnrollmentScopes := getRemotePcEnrollmentScopes(plan, true) + body.SetRemotePCEnrollmentScopes(remotePCEnrollmentScopes) + } else { + machinesRequest, err = getMachinesForManualCatalogs(ctx, client, plan.MachineAccounts) + if err != nil { + diagnostics.AddError( + "Error creating Machine Catalog", + fmt.Sprintf("Failed to resolve machines, error: %s", err.Error()), + ) + return nil, err + } + body.SetMachines(machinesRequest) + } + + return &body, nil +} + +func getRequestModelForUpdateMachineCatalog(plan, state MachineCatalogResourceModel, catalog *citrixorchestration.MachineCatalogDetailResponseModel, ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.UpdateResponse, connectionType *citrixorchestration.HypervisorConnectionType, isOnPremises bool) (*citrixorchestration.UpdateMachineCatalogRequestModel, error) { + // Generate API request body from plan + var body citrixorchestration.UpdateMachineCatalogRequestModel + body.SetName(plan.Name.ValueString()) + body.SetDescription(plan.Description.ValueString()) + body.SetZone(plan.Zone.ValueString()) + if !plan.VdaUpgradeType.IsNull() { + body.SetVdaUpgradeType(citrixorchestration.VdaUpgradeType(plan.VdaUpgradeType.ValueString())) + } else { + body.SetVdaUpgradeType(citrixorchestration.VDAUPGRADETYPE_NOT_SET) + } + + provisioningType, err := citrixorchestration.NewProvisioningTypeFromValue(plan.ProvisioningType.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error creating Machine Catalog", + "Unsupported provisioning type.", + ) + + return nil, err + } + + if *provisioningType == citrixorchestration.PROVISIONINGTYPE_MANUAL { + + if plan.IsRemotePc.ValueBool() { + remotePCEnrollmentScopes := getRemotePcEnrollmentScopes(plan, false) + body.SetRemotePCEnrollmentScopes(remotePCEnrollmentScopes) + } + + return &body, nil + } + + if plan.ProvisioningScheme.IdentityType.ValueString() == string(citrixorchestration.IDENTITYTYPE_AZURE_AD) { + if isOnPremises { + resp.Diagnostics.AddAttributeError( + path.Root("identity_type"), + "Unsupported Machine Catalog Configuration", + fmt.Sprintf("Identity type %s is not supported in OnPremises environment. ", string(citrixorchestration.IDENTITYTYPE_AZURE_AD)), + ) + + return nil, err + } + } + + body, err = setProvSchemePropertiesForUpdateCatalog(plan, body, ctx, client, &resp.Diagnostics, connectionType) + if err != nil { + return nil, err + } + + return &body, nil +} + +func generateBatchApiHeaders(client *citrixdaasclient.CitrixDaasClient, plan MachineCatalogResourceModel, generateCredentialHeader bool) ([]citrixorchestration.NameValueStringPairModel, *http.Response, error) { + headers := []citrixorchestration.NameValueStringPairModel{} + + cwsAuthToken, httpResp, err := client.SignIn() + var token string + if err != nil { + return headers, httpResp, err + } + + if cwsAuthToken != "" { + token = strings.Split(cwsAuthToken, "=")[1] + var header citrixorchestration.NameValueStringPairModel + header.SetName("Authorization") + header.SetValue("Bearer " + token) + headers = append(headers, header) + } + + if generateCredentialHeader && plan.ProvisioningScheme.MachineDomainIdentity != nil { + adminCredentialHeader := generateAdminCredentialHeader(plan) + var header citrixorchestration.NameValueStringPairModel + header.SetName("X-AdminCredential") + header.SetValue(adminCredentialHeader) + headers = append(headers, header) + } + + return headers, httpResp, err +} + +func readMachineCatalog(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.ReadResponse, machineCatalogId string) (*citrixorchestration.MachineCatalogDetailResponseModel, *http.Response, error) { + getMachineCatalogRequest := client.ApiClient.MachineCatalogsAPIsDAAS.MachineCatalogsGetMachineCatalog(ctx, machineCatalogId).Fields("Id,Name,HypervisorConnection,ProvisioningScheme,RemotePCEnrollmentScopes") + catalog, httpResp, err := util.ReadResource[*citrixorchestration.MachineCatalogDetailResponseModel](getMachineCatalogRequest, ctx, client, resp, "Machine Catalog", machineCatalogId) + + client.ApiClient.MachineCatalogsAPIsDAAS.MachineCatalogsGetMachineCatalogMachines(ctx, machineCatalogId).Execute() + + return catalog, httpResp, err +} + +func deleteMachinesFromCatalog(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.UpdateResponse, plan MachineCatalogResourceModel, machinesToDelete []citrixorchestration.MachineResponseModel, catalogNameOrId string, isMcsCatalog bool) error { + batchApiHeaders, httpResp, err := generateBatchApiHeaders(client, plan, false) + txId := citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp) + if err != nil { + resp.Diagnostics.AddError( + "Error updating Machine Catalog "+catalogNameOrId, + "TransactionId: "+txId+ + "\nCould not put machine(s) into maintenance mode before deleting them, unexpected error: "+util.ReadClientError(err), + ) + return err + } + batchRequestItems := []citrixorchestration.BatchRequestItemModel{} + + for index, machineToDelete := range machinesToDelete { + if machineToDelete.DeliveryGroup == nil { + // if machine has no delivery group, there is no need to put it in maintenance mode + continue + } + + isMachineInMaintenanceMode := machineToDelete.GetInMaintenanceMode() + + if !isMachineInMaintenanceMode { + // machine is not in maintenance mode. Put machine in maintenance mode first before deleting + var updateMachineModel citrixorchestration.UpdateMachineRequestModel + updateMachineModel.SetInMaintenanceMode(true) + updateMachineStringBody, err := util.ConvertToString(updateMachineModel) + if err != nil { + resp.Diagnostics.AddError( + "Error removing Machine(s) from Machine Catalog "+catalogNameOrId, + "An unexpected error occurred: "+err.Error(), + ) + return err + } + relativeUrl := fmt.Sprintf("/Machines/%s?async=true", machineToDelete.GetId()) + + var batchRequestItem citrixorchestration.BatchRequestItemModel + batchRequestItem.SetReference(strconv.Itoa(index)) + batchRequestItem.SetMethod(http.MethodPatch) + batchRequestItem.SetRelativeUrl(client.GetBatchRequestItemRelativeUrl(relativeUrl)) + batchRequestItem.SetBody(updateMachineStringBody) + batchRequestItem.SetHeaders(batchApiHeaders) + batchRequestItems = append(batchRequestItems, batchRequestItem) + } + } + + if len(batchRequestItems) > 0 { + // If there are any machines that need to be put in maintenance mode + var batchRequestModel citrixorchestration.BatchRequestModel + batchRequestModel.SetItems(batchRequestItems) + successfulJobs, txId, err := citrixdaasclient.PerformBatchOperation(ctx, client, batchRequestModel) + if err != nil { + resp.Diagnostics.AddError( + "Error deleting machine(s) from Machine Catalog "+catalogNameOrId, + "TransactionId: "+txId+ + "\nError message: "+util.ReadClientError(err), + ) + return err + } + + if successfulJobs < len(batchRequestItems) { + errMsg := fmt.Sprintf("An error occurred while putting machine(s) into maintenance mode before deleting them. %d of %d machines were put in the maintenance mode.", successfulJobs, len(batchRequestItems)) + err = fmt.Errorf(errMsg) + resp.Diagnostics.AddError( + "Error updating Machine Catalog "+catalogNameOrId, + "TransactionId: "+txId+ + "\n"+errMsg, + ) + + return err + } + } + + batchApiHeaders, httpResp, err = generateBatchApiHeaders(client, plan, isMcsCatalog) + txId = citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp) + if err != nil { + resp.Diagnostics.AddError( + "Error updating Machine Catalog "+catalogNameOrId, + "TransactionId: "+txId+ + "\nCould not delete machine(s) from machine catalog, unexpected error: "+util.ReadClientError(err), + ) + return err + } + + deleteAccountOpion := "Leave" + if isMcsCatalog { + deleteAccountOpion = "Delete" + } + batchRequestItems = []citrixorchestration.BatchRequestItemModel{} + for index, machineToDelete := range machinesToDelete { + var batchRequestItem citrixorchestration.BatchRequestItemModel + relativeUrl := fmt.Sprintf("/Machines/%s?deleteVm=%t&purgeDBOnly=false&deleteAccount=%s&async=true", machineToDelete.GetId(), isMcsCatalog, deleteAccountOpion) + batchRequestItem.SetReference(strconv.Itoa(index)) + batchRequestItem.SetMethod(http.MethodDelete) + batchRequestItem.SetHeaders(batchApiHeaders) + batchRequestItem.SetRelativeUrl(client.GetBatchRequestItemRelativeUrl(relativeUrl)) + batchRequestItems = append(batchRequestItems, batchRequestItem) + } + + batchRequestModel := citrixorchestration.BatchRequestModel{} + batchRequestModel.SetItems(batchRequestItems) + successfulJobs, txId, err := citrixdaasclient.PerformBatchOperation(ctx, client, batchRequestModel) + if err != nil { + resp.Diagnostics.AddError( + "Error deleting machine(s) from Machine Catalog "+catalogNameOrId, + "TransactionId: "+txId+ + "\nError message: "+util.ReadClientError(err), + ) + return err + } + + if successfulJobs < len(machinesToDelete) { + errMsg := fmt.Sprintf("An error occurred while deleting machine(s) from Machine Catalog. %d of %d machines were deleted from the Machine Catalog.", successfulJobs, len(batchRequestItems)) + err = fmt.Errorf(errMsg) + resp.Diagnostics.AddError( + "Error updating Machine Catalog "+catalogNameOrId, + "TransactionId: "+txId+ + "\n"+errMsg, + ) + + return err + } + + return nil +} + +func allocationTypeEnumToString(conn citrixorchestration.AllocationType) string { + switch conn { + case citrixorchestration.ALLOCATIONTYPE_UNKNOWN: + return "Unknown" + case citrixorchestration.ALLOCATIONTYPE_RANDOM: + return "Random" + case citrixorchestration.ALLOCATIONTYPE_STATIC: + return "Static" + default: + return "" + } +} + +func (scope RemotePcOuModel) RefreshListItem(remote citrixorchestration.RemotePCEnrollmentScopeResponseModel) RemotePcOuModel { + scope.OUName = types.StringValue(remote.GetOU()) + scope.IncludeSubFolders = types.BoolValue(remote.GetIncludeSubfolders()) + + return scope +} diff --git a/internal/daas/machine_catalog/machine_catalog_manual_utils.go b/internal/daas/machine_catalog/machine_catalog_manual_utils.go new file mode 100644 index 0000000..5b26432 --- /dev/null +++ b/internal/daas/machine_catalog/machine_catalog_manual_utils.go @@ -0,0 +1,512 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package machine_catalog + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + citrixdaasclient "github.com/citrix/citrix-daas-rest-go/client" + "github.com/citrix/terraform-provider-citrix/internal/util" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func getMachinesForManualCatalogs(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, machineAccounts []MachineAccountsModel) ([]citrixorchestration.AddMachineToMachineCatalogRequestModel, error) { + if machineAccounts == nil { + return nil, nil + } + + addMachineRequestList := []citrixorchestration.AddMachineToMachineCatalogRequestModel{} + for _, machineAccount := range machineAccounts { + hypervisorId := machineAccount.Hypervisor.ValueString() + var hypervisor *citrixorchestration.HypervisorDetailResponseModel + var err error + if hypervisorId != "" { + hypervisor, err = util.GetHypervisor(ctx, client, nil, hypervisorId) + + if err != nil { + return nil, err + } + } + + for _, machine := range machineAccount.Machines { + addMachineRequest := citrixorchestration.AddMachineToMachineCatalogRequestModel{} + addMachineRequest.SetMachineName(machine.MachineAccount.ValueString()) + + if hypervisorId == "" { + // no hypervisor, non-power managed manual catalog + addMachineRequestList = append(addMachineRequestList, addMachineRequest) + continue + } + + machineName := machine.MachineName.ValueString() + var vmId string + connectionType := hypervisor.GetConnectionType() + switch connectionType { + case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: + if machine.Region.IsNull() || machine.ResourceGroupName.IsNull() { + return nil, fmt.Errorf("region and resource_group_name are required for Azure") + } + region, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, "", machine.Region.ValueString(), "Region", "", hypervisor) + if err != nil { + return nil, err + } + regionPath := region.GetXDPath() + vm, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, fmt.Sprintf("%s\\vm.folder", regionPath), machineName, "Vm", machine.ResourceGroupName.ValueString(), hypervisor) + if err != nil { + return nil, err + } + vmId = vm.GetId() + case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: + if machine.AvailabilityZone.IsNull() { + return nil, fmt.Errorf("availability_zone is required for AWS") + } + availabilityZone, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, "", machine.AvailabilityZone.ValueString(), "", "", hypervisor) + if err != nil { + return nil, err + } + availabilityZonePath := availabilityZone.GetXDPath() + vm, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, availabilityZonePath, machineName, "Vm", "", hypervisor) + if err != nil { + return nil, err + } + vmId = vm.GetId() + case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: + if machine.Region.IsNull() || machine.ProjectName.IsNull() { + return nil, fmt.Errorf("region and project_name are required for GCP") + } + projectName, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, "", machine.ProjectName.ValueString(), "", "", hypervisor) + if err != nil { + return nil, err + } + projectNamePath := projectName.GetXDPath() + vm, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, fmt.Sprintf("%s\\%s.region", projectNamePath, machine.Region.ValueString()), machineName, "Vm", "", hypervisor) + if err != nil { + return nil, err + } + vmId = vm.GetId() + case citrixorchestration.HYPERVISORCONNECTIONTYPE_V_CENTER: + if machine.Datacenter.IsNull() || machine.Host.IsNull() { + return nil, fmt.Errorf("datacenter and host are required for Vsphere") + } + + folderPath := hypervisor.GetXDPath() + datacenter, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, folderPath, machine.Datacenter.ValueString(), "datacenter", "", hypervisor) + if err != nil { + return nil, err + } + + folderPath = datacenter.GetXDPath() + + if !machine.Cluster.IsNull() { + cluster, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, folderPath, machine.Cluster.ValueString(), "cluster", "", hypervisor) + if err != nil { + return nil, err + } + folderPath = cluster.GetXDPath() + } + + host, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, folderPath, machine.Host.ValueString(), "computeresource", "", hypervisor) + if err != nil { + return nil, err + } + hostPath := host.GetXDPath() + vm, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, hostPath, machineName, "Vm", "", hypervisor) + if err != nil { + return nil, err + } + vmId = vm.GetId() + case citrixorchestration.HYPERVISORCONNECTIONTYPE_XEN_SERVER: + vm, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, "", machineName, "Vm", "", hypervisor) + if err != nil { + return nil, err + } + vmId = vm.GetId() + case citrixorchestration.HYPERVISORCONNECTIONTYPE_CUSTOM: + if hypervisor.GetPluginId() == util.NUTANIX_PLUGIN_ID { + hypervisorXdPath := hypervisor.GetXDPath() + vm, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, fmt.Sprintf("%s\\VirtualMachines.folder", hypervisorXdPath), machineName, "Vm", "", hypervisor) + if err != nil { + return nil, err + } + vmId = vm.GetId() + } + } + + addMachineRequest.SetHostedMachineId(vmId) + addMachineRequest.SetHypervisorConnection(hypervisorId) + + addMachineRequestList = append(addMachineRequestList, addMachineRequest) + } + } + + return addMachineRequestList, nil +} + +func deleteMachinesFromManualCatalog(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.UpdateResponse, deleteMachinesList map[string]bool, catalogNameOrId string, isCatalogPowerManaged bool) error { + + if len(deleteMachinesList) < 1 { + // nothing to delete + return nil + } + + getMachinesResponse, err := util.GetMachineCatalogMachines(ctx, client, &resp.Diagnostics, catalogNameOrId) + if err != nil { + return err + } + + machinesToDelete := []citrixorchestration.MachineResponseModel{} + for _, machine := range getMachinesResponse.Items { + if deleteMachinesList[strings.ToLower(machine.GetName())] { + machinesToDelete = append(machinesToDelete, machine) + } + } + + return deleteMachinesFromCatalog(ctx, client, resp, MachineCatalogResourceModel{}, machinesToDelete, catalogNameOrId, false) +} + +func addMachinesToManualCatalog(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.UpdateResponse, addMachinesList []MachineAccountsModel, catalogIdOrName string) error { + + if len(addMachinesList) < 1 { + // no machines to add + return nil + } + + addMachinesRequest, err := getMachinesForManualCatalogs(ctx, client, addMachinesList) + if err != nil { + resp.Diagnostics.AddError( + "Error adding machines(s) to Machine Catalog "+catalogIdOrName, + fmt.Sprintf("Failed to resolve machines, error: %s", err.Error()), + ) + + return err + } + + batchApiHeaders, httpResp, err := generateBatchApiHeaders(client, MachineCatalogResourceModel{}, false) + txId := citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp) + if err != nil { + resp.Diagnostics.AddError( + "Error updating Machine Catalog "+catalogIdOrName, + "TransactionId: "+txId+ + "\nCould not add machine to Machine Catalog, unexpected error: "+util.ReadClientError(err), + ) + return err + } + + batchRequestItems := []citrixorchestration.BatchRequestItemModel{} + relativeUrl := fmt.Sprintf("/MachineCatalogs/%s/Machines?async=true", catalogIdOrName) + for i := 0; i < len(addMachinesRequest); i++ { + addMachineRequestStringBody, err := util.ConvertToString(addMachinesRequest[i]) + if err != nil { + resp.Diagnostics.AddError( + "Error adding Machine to Machine Catalog "+catalogIdOrName, + "An unexpected error occurred: "+err.Error(), + ) + return err + } + var batchRequestItem citrixorchestration.BatchRequestItemModel + batchRequestItem.SetMethod(http.MethodPost) + batchRequestItem.SetReference(strconv.Itoa(i)) + batchRequestItem.SetRelativeUrl(client.GetBatchRequestItemRelativeUrl(relativeUrl)) + batchRequestItem.SetBody(addMachineRequestStringBody) + batchRequestItem.SetHeaders(batchApiHeaders) + batchRequestItems = append(batchRequestItems, batchRequestItem) + } + + var batchRequestModel citrixorchestration.BatchRequestModel + batchRequestModel.SetItems(batchRequestItems) + successfulJobs, txId, err := citrixdaasclient.PerformBatchOperation(ctx, client, batchRequestModel) + if err != nil { + resp.Diagnostics.AddError( + "Error adding machine(s) to Machine Catalog "+catalogIdOrName, + "TransactionId: "+txId+ + "\nError message: "+util.ReadClientError(err), + ) + return err + } + + if successfulJobs < len(addMachinesList) { + errMsg := fmt.Sprintf("An error occurred while adding machine(s) to the Machine Catalog. %d of %d machines were added to the Machine Catalog.", successfulJobs, len(addMachinesList)) + err = fmt.Errorf(errMsg) + resp.Diagnostics.AddError( + "Error updating Machine Catalog "+catalogIdOrName, + "TransactionId: "+txId+ + "\n"+errMsg, + ) + + return err + } + + return nil +} + +func createAddAndRemoveMachinesListForManualCatalogs(state, plan MachineCatalogResourceModel) ([]MachineAccountsModel, map[string]bool) { + addMachinesList := []MachineAccountsModel{} + existingMachineAccounts := map[string]map[string]bool{} + + // create map for existing machines marking all machines for deletion + if state.MachineAccounts != nil { + for _, machineAccount := range state.MachineAccounts { + for _, machine := range machineAccount.Machines { + machineMap, exists := existingMachineAccounts[machineAccount.Hypervisor.ValueString()] + if !exists { + existingMachineAccounts[machineAccount.Hypervisor.ValueString()] = map[string]bool{} + machineMap = existingMachineAccounts[machineAccount.Hypervisor.ValueString()] + } + machineMap[strings.ToLower(machine.MachineAccount.ValueString())] = true + } + } + } + + // iterate over plan and if machine already exists, mark false for deletion. If not, add it to the addMachineList + if plan.MachineAccounts != nil { + for _, machineAccount := range plan.MachineAccounts { + machineAccountMachines := []MachineCatalogMachineModel{} + for _, machine := range machineAccount.Machines { + if existingMachineAccounts[machineAccount.Hypervisor.ValueString()][strings.ToLower(machine.MachineAccount.ValueString())] { + // Machine exists. Mark false for deletion + existingMachineAccounts[machineAccount.Hypervisor.ValueString()][strings.ToLower(machine.MachineAccount.ValueString())] = false + } else { + // Machine does not exist and needs to be added + machineAccountMachines = append(machineAccountMachines, machine) + } + } + + if len(machineAccountMachines) > 0 { + var addMachineAccount MachineAccountsModel + addMachineAccount.Hypervisor = machineAccount.Hypervisor + addMachineAccount.Machines = machineAccountMachines + addMachinesList = append(addMachinesList, addMachineAccount) + } + } + } + + deleteMachinesMap := map[string]bool{} + + for _, machineMap := range existingMachineAccounts { + for machineName, canBeDeleted := range machineMap { + if canBeDeleted { + deleteMachinesMap[machineName] = true + } + } + } + + return addMachinesList, deleteMachinesMap +} + +func (r MachineCatalogResourceModel) updateCatalogWithMachines(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, machines *citrixorchestration.MachineResponseModelCollection) MachineCatalogResourceModel { + if machines == nil { + r.MachineAccounts = nil + return r + } + + machineMapFromRemote := map[string]citrixorchestration.MachineResponseModel{} + for _, machine := range machines.GetItems() { + machineMapFromRemote[strings.ToLower(machine.GetName())] = machine + } + + if r.MachineAccounts != nil { + machinesNotPresetInRemote := map[string]bool{} + for _, machineAccount := range r.MachineAccounts { + for _, machineFromPlan := range machineAccount.Machines { + machineFromPlanName := machineFromPlan.MachineAccount.ValueString() + machineFromRemote, exists := machineMapFromRemote[strings.ToLower(machineFromPlanName)] + if !exists { + machinesNotPresetInRemote[strings.ToLower(machineFromPlanName)] = true + continue + } + + hosting := machineFromRemote.GetHosting() + hypervisor := hosting.GetHypervisorConnection() + hypervisorId := hypervisor.GetId() + hostingServerName := hosting.GetHostingServerName() + hostedMachineName := hosting.GetHostedMachineName() + + if !strings.EqualFold(hypervisorId, machineAccount.Hypervisor.ValueString()) { + machinesNotPresetInRemote[strings.ToLower(machineFromPlanName)] = true + continue + } + + if hypervisorId == "" { + delete(machineMapFromRemote, strings.ToLower(machineFromPlanName)) + continue + } + + hyp, err := util.GetHypervisor(ctx, client, nil, hypervisorId) + if err != nil { + machinesNotPresetInRemote[strings.ToLower(machineFromPlanName)] = true + continue + } + + connectionType := hyp.GetConnectionType() + hostedMachineId := hosting.GetHostedMachineId() + switch connectionType { + case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: + if hostedMachineId != "" { + hostedMachineIdArray := strings.Split(hostedMachineId, "/") // hosted machine id is resourcegroupname/vmname + if !strings.EqualFold(machineFromPlan.ResourceGroupName.ValueString(), hostedMachineIdArray[0]) { + machineFromPlan.ResourceGroupName = types.StringValue(hostedMachineIdArray[0]) + } + if !strings.EqualFold(machineFromPlan.MachineName.ValueString(), hostedMachineIdArray[1]) { + machineFromPlan.MachineName = types.StringValue(hostedMachineIdArray[1]) + } + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: + if hostedMachineId != "" { + machineIdArray := strings.Split(hostedMachineId, ":") // hosted machine id is projectname:region:vmname + if !strings.EqualFold(machineFromPlan.Region.ValueString(), machineIdArray[1]) { + machineFromPlan.Region = types.StringValue(machineIdArray[1]) + } + if !strings.EqualFold(machineFromPlan.MachineName.ValueString(), machineIdArray[2]) { + machineFromPlan.MachineName = types.StringValue(machineIdArray[2]) + } + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_V_CENTER: + if hostingServerName != "" { + if !strings.EqualFold(machineFromPlan.Host.ValueString(), hostingServerName) { + machineFromPlan.Host = types.StringValue(hostingServerName) + } + } + if hostedMachineName != "" { + if !strings.EqualFold(machineFromPlan.MachineName.ValueString(), hostedMachineName) { + machineFromPlan.MachineName = types.StringValue(hostedMachineName) + } + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_XEN_SERVER: + if hostedMachineName != "" { + if !strings.EqualFold(machineFromPlan.MachineName.ValueString(), hostedMachineName) { + machineFromPlan.MachineName = types.StringValue(hostedMachineName) + } + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_CUSTOM: + if hyp.GetPluginId() == util.NUTANIX_PLUGIN_ID && hostedMachineName != "" { + if !strings.EqualFold(machineFromPlan.MachineName.ValueString(), hostedMachineName) { + machineFromPlan.MachineName = types.StringValue(hostedMachineName) + } + } + // case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: AvailabilityZone is not available from remote + } + + delete(machineMapFromRemote, strings.ToLower(machineFromPlanName)) + } + } + + machineAccounts := []MachineAccountsModel{} + for _, machineAccount := range r.MachineAccounts { + machines := []MachineCatalogMachineModel{} + for _, machine := range machineAccount.Machines { + if machinesNotPresetInRemote[strings.ToLower(machine.MachineAccount.ValueString())] { + continue + } + machines = append(machines, machine) + } + machineAccount.Machines = machines + machineAccounts = append(machineAccounts, machineAccount) + } + + r.MachineAccounts = machineAccounts + } + + // go over any machines that are in remote but were not in plan + newMachines := map[string][]MachineCatalogMachineModel{} + for machineName, machineFromRemote := range machineMapFromRemote { + hosting := machineFromRemote.GetHosting() + hypConnection := hosting.GetHypervisorConnection() + hypId := hypConnection.GetId() + + var machineModel MachineCatalogMachineModel + machineModel.MachineAccount = types.StringValue(machineName) + + if hypId != "" { + hyp, err := util.GetHypervisor(ctx, client, nil, hypId) + if err != nil { + continue + } + + connectionType := hyp.GetConnectionType() + hostedMachineId := hosting.GetHostedMachineId() + hostingServerName := hosting.GetHostingServerName() + hostedMachineName := hosting.GetHostedMachineName() + + switch connectionType { + case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: + if hostedMachineId != "" { + hostedMachineIdArray := strings.Split(hostedMachineId, "/") // hosted machine id is resourcegroupname/vmname + machineModel.ResourceGroupName = types.StringValue(hostedMachineIdArray[0]) + machineModel.MachineName = types.StringValue(hostedMachineIdArray[1]) + // region is not available from remote + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: + if hostedMachineId != "" { + machineIdArray := strings.Split(hostedMachineId, ":") // hosted machine id is projectname:region:vmname + machineModel.ProjectName = types.StringValue(machineIdArray[0]) + machineModel.Region = types.StringValue(machineIdArray[1]) + machineModel.MachineName = types.StringValue(machineIdArray[2]) + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_V_CENTER: + if hostingServerName != "" { + machineModel.Host = types.StringValue(hostingServerName) + } + if hostedMachineName != "" { + machineModel.MachineName = types.StringValue(hostedMachineName) + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_XEN_SERVER: + if hostedMachineName != "" { + machineModel.MachineName = types.StringValue(hostedMachineName) + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_CUSTOM: + if hyp.GetPluginId() == util.NUTANIX_PLUGIN_ID && hostedMachineName != "" { + machineModel.MachineName = types.StringValue(hostedMachineName) + } + // case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: AvailabilityZone is not available from remote + } + } + + _, exists := newMachines[hypId] + if !exists { + newMachines[hypId] = []MachineCatalogMachineModel{} + } + + newMachines[hypId] = append(newMachines[hypId], machineModel) + } + + if len(newMachines) > 0 && r.MachineAccounts == nil { + r.MachineAccounts = []MachineAccountsModel{} + } + + machineAccountMap := map[string]int{} + for index, machineAccount := range r.MachineAccounts { + machineAccountMap[machineAccount.Hypervisor.ValueString()] = index + } + + for hypId, machines := range newMachines { + machineAccIndex, exists := machineAccountMap[hypId] + if exists { + machAccounts := r.MachineAccounts + machineAccount := machAccounts[machineAccIndex] + if machineAccount.Machines == nil { + machineAccount.Machines = []MachineCatalogMachineModel{} + } + machineAccountMachines := machineAccount.Machines + machineAccountMachines = append(machineAccountMachines, machines...) + machineAccount.Machines = machineAccountMachines + machAccounts[machineAccIndex] = machineAccount + r.MachineAccounts = machAccounts + continue + } + var machineAccount MachineAccountsModel + machineAccount.Hypervisor = types.StringValue(hypId) + machineAccount.Machines = machines + machAccounts := r.MachineAccounts + machAccounts = append(machAccounts, machineAccount) + machineAccountMap[hypId] = len(machAccounts) - 1 + r.MachineAccounts = machAccounts + } + + return r +} diff --git a/internal/daas/machine_catalog/machine_catalog_mcs_utils.go b/internal/daas/machine_catalog/machine_catalog_mcs_utils.go new file mode 100644 index 0000000..a44d7f9 --- /dev/null +++ b/internal/daas/machine_catalog/machine_catalog_mcs_utils.go @@ -0,0 +1,791 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package machine_catalog + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "reflect" + "strconv" + "strings" + + "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + citrixdaasclient "github.com/citrix/citrix-daas-rest-go/client" + "github.com/citrix/terraform-provider-citrix/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" + "golang.org/x/exp/slices" +) + +func getProvSchemeForMcsCatalog(plan MachineCatalogResourceModel, ctx context.Context, client *citrixdaasclient.CitrixDaasClient, diagnostics *diag.Diagnostics, isOnPremises bool) (*citrixorchestration.CreateMachineCatalogProvisioningSchemeRequestModel, error) { + if plan.ProvisioningScheme.IdentityType.ValueString() == string(citrixorchestration.IDENTITYTYPE_AZURE_AD) { + if isOnPremises { + diagnostics.AddAttributeError( + path.Root("identity_type"), + "Unsupported Machine Catalog Configuration", + fmt.Sprintf("Identity type %s is not supported in OnPremises environment. ", string(citrixorchestration.IDENTITYTYPE_AZURE_AD)), + ) + + return nil, fmt.Errorf("identity type %s is not supported in OnPremises environment. ", string(citrixorchestration.IDENTITYTYPE_AZURE_AD)) + } + } + + hypervisor, err := util.GetHypervisor(ctx, client, diagnostics, plan.ProvisioningScheme.Hypervisor.ValueString()) + if err != nil { + return nil, err + } + + hypervisorResourcePool, err := util.GetHypervisorResourcePool(ctx, client, diagnostics, plan.ProvisioningScheme.Hypervisor.ValueString(), plan.ProvisioningScheme.HypervisorResourcePool.ValueString()) + if err != nil { + return nil, err + } + + provisioningScheme, errorMsg := buildProvSchemeForMcsCatalog(ctx, client, plan, hypervisor, hypervisorResourcePool) + if errorMsg != "" || provisioningScheme == nil { + diagnostics.AddError( + "Error creating Machine Catalog", + errorMsg, + ) + + return nil, fmt.Errorf(errorMsg) + } + + return provisioningScheme, nil +} + +func buildProvSchemeForMcsCatalog(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, plan MachineCatalogResourceModel, hypervisor *citrixorchestration.HypervisorDetailResponseModel, hypervisorResourcePool *citrixorchestration.HypervisorResourcePoolDetailResponseModel) (*citrixorchestration.CreateMachineCatalogProvisioningSchemeRequestModel, string) { + + var machineAccountCreationRules citrixorchestration.MachineAccountCreationRulesRequestModel + machineAccountCreationRules.SetNamingScheme(plan.ProvisioningScheme.MachineAccountCreationRules.NamingScheme.ValueString()) + namingScheme, err := citrixorchestration.NewNamingSchemeTypeFromValue(plan.ProvisioningScheme.MachineAccountCreationRules.NamingSchemeType.ValueString()) + if err != nil { + return nil, "Unsupported machine account naming scheme type." + } + + machineAccountCreationRules.SetNamingSchemeType(*namingScheme) + if plan.ProvisioningScheme.MachineDomainIdentity != nil { + machineAccountCreationRules.SetDomain(plan.ProvisioningScheme.MachineDomainIdentity.Domain.ValueString()) + machineAccountCreationRules.SetOU(plan.ProvisioningScheme.MachineDomainIdentity.Ou.ValueString()) + } + + var provisioningScheme citrixorchestration.CreateMachineCatalogProvisioningSchemeRequestModel + provisioningScheme.SetNumTotalMachines(int32(plan.ProvisioningScheme.NumTotalMachines.ValueInt64())) + identityType := citrixorchestration.IdentityType(plan.ProvisioningScheme.IdentityType.ValueString()) + provisioningScheme.SetIdentityType(identityType) + provisioningScheme.SetWorkGroupMachines(false) // Non-Managed setup does not support non-domain joined + if identityType == citrixorchestration.IDENTITYTYPE_AZURE_AD { + provisioningScheme.SetWorkGroupMachines(true) + } + provisioningScheme.SetMachineAccountCreationRules(machineAccountCreationRules) + provisioningScheme.SetResourcePool(plan.ProvisioningScheme.HypervisorResourcePool.ValueString()) + + switch hypervisor.GetConnectionType() { + case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: + serviceOffering := plan.ProvisioningScheme.AzureMachineConfig.ServiceOffering.ValueString() + queryPath := "serviceoffering.folder" + serviceOfferingPath, err := util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, serviceOffering, "serviceoffering", "") + if err != nil { + return nil, fmt.Sprintf("Failed to resolve service offering %s on Azure, error: %s", serviceOffering, err.Error()) + } + provisioningScheme.SetServiceOfferingPath(serviceOfferingPath) + + resourceGroup := plan.ProvisioningScheme.AzureMachineConfig.ResourceGroup.ValueString() + masterImage := plan.ProvisioningScheme.AzureMachineConfig.MasterImage.ValueString() + imagePath := "" + if masterImage != "" { + storageAccount := plan.ProvisioningScheme.AzureMachineConfig.StorageAccount.ValueString() + container := plan.ProvisioningScheme.AzureMachineConfig.Container.ValueString() + if storageAccount != "" && container != "" { + queryPath = fmt.Sprintf( + "image.folder\\%s.resourcegroup\\%s.storageaccount\\%s.container", + resourceGroup, + storageAccount, + container) + imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, masterImage, "", "") + if err != nil { + return nil, fmt.Sprintf("Failed to resolve master image VHD %s in container %s of storage account %s, error: %s", masterImage, container, storageAccount, err.Error()) + } + } else { + queryPath = fmt.Sprintf( + "image.folder\\%s.resourcegroup", + resourceGroup) + imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, masterImage, "", "") + if err != nil { + return nil, fmt.Sprintf("Failed to resolve master image Managed Disk or Snapshot %s, error: %s", masterImage, err.Error()) + } + } + } else if plan.ProvisioningScheme.AzureMachineConfig.GalleryImage != nil { + gallery := plan.ProvisioningScheme.AzureMachineConfig.GalleryImage.Gallery.ValueString() + definition := plan.ProvisioningScheme.AzureMachineConfig.GalleryImage.Definition.ValueString() + version := plan.ProvisioningScheme.AzureMachineConfig.GalleryImage.Version.ValueString() + if gallery != "" && definition != "" { + queryPath = fmt.Sprintf( + "image.folder\\%s.resourcegroup\\%s.gallery\\%s.imagedefinition", + resourceGroup, + gallery, + definition) + imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, version, "", "") + if err != nil { + return nil, fmt.Sprintf("Failed to locate Azure Image Gallery image %s of version %s in gallery %s, error: %s", masterImage, version, gallery, err.Error()) + } + } + } + + provisioningScheme.SetMasterImagePath(imagePath) + + machineProfile := plan.ProvisioningScheme.AzureMachineConfig.MachineProfile + if machineProfile != nil { + machine := machineProfile.MachineProfileVmName.ValueString() + machineProfileResourceGroup := machineProfile.MachineProfileResourceGroup.ValueString() + queryPath = fmt.Sprintf("machineprofile.folder\\%s.resourcegroup", machineProfileResourceGroup) + machineProfilePath, err := util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, machine, "vm", "") + if err != nil { + return nil, fmt.Sprintf("Failed to locate machine profile %s on Azure, error: %s", plan.ProvisioningScheme.AzureMachineConfig.MachineProfile.MachineProfileVmName.ValueString(), err.Error()) + } + provisioningScheme.SetMachineProfilePath(machineProfilePath) + } + + if plan.ProvisioningScheme.AzureMachineConfig.WritebackCache != nil { + provisioningScheme.SetUseWriteBackCache(true) + provisioningScheme.SetWriteBackCacheDiskSizeGB(int32(plan.ProvisioningScheme.AzureMachineConfig.WritebackCache.WriteBackCacheDiskSizeGB.ValueInt64())) + if !plan.ProvisioningScheme.AzureMachineConfig.WritebackCache.WriteBackCacheMemorySizeMB.IsNull() { + provisioningScheme.SetWriteBackCacheMemorySizeMB(int32(plan.ProvisioningScheme.AzureMachineConfig.WritebackCache.WriteBackCacheMemorySizeMB.ValueInt64())) + } + if plan.ProvisioningScheme.AzureMachineConfig.WritebackCache.PersistVm.ValueBool() && !plan.ProvisioningScheme.AzureMachineConfig.WritebackCache.PersistOsDisk.ValueBool() { + return nil, "Could not set persist_vm attribute, which can only be set when persist_os_disk = true" + } + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: + serviceOffering := plan.ProvisioningScheme.AwsMachineConfig.ServiceOffering.ValueString() + serviceOfferingPath, err := util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), "", serviceOffering, "serviceoffering", "") + if err != nil { + return nil, fmt.Sprintf("Failed to resolve service offering %s on AWS, error: %s", serviceOffering, err.Error()) + } + provisioningScheme.SetServiceOfferingPath(serviceOfferingPath) + + masterImage := plan.ProvisioningScheme.AwsMachineConfig.MasterImage.ValueString() + imageId := fmt.Sprintf("%s (%s)", masterImage, plan.ProvisioningScheme.AwsMachineConfig.ImageAmi.ValueString()) + imagePath, err := util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), "", imageId, "template", "") + if err != nil { + return nil, fmt.Sprintf("Failed to locate AWS image %s with AMI %s, error: %s", masterImage, plan.ProvisioningScheme.AwsMachineConfig.ImageAmi.ValueString(), err.Error()) + } + provisioningScheme.SetMasterImagePath(imagePath) + case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: + imagePath := "" + snapshot := plan.ProvisioningScheme.GcpMachineConfig.MachineSnapshot.ValueString() + if snapshot != "" { + queryPath := fmt.Sprintf("%s.vm", plan.ProvisioningScheme.GcpMachineConfig.MasterImage.ValueString()) + imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, plan.ProvisioningScheme.GcpMachineConfig.MachineSnapshot.ValueString(), "snapshot", "") + if err != nil { + return nil, fmt.Sprintf("Failed to locate master image snapshot %s on GCP, error: %s", plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString(), err.Error()) + } + } else { + imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), "", plan.ProvisioningScheme.GcpMachineConfig.MasterImage.ValueString(), "vm", "") + if err != nil { + return nil, fmt.Sprintf("Failed to locate master image machine %s on GCP, error: %s", plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString(), err.Error()) + } + } + + provisioningScheme.SetMasterImagePath(imagePath) + + machineProfile := plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString() + if machineProfile != "" { + machineProfilePath, err := util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), "", machineProfile, "vm", "") + if err != nil { + return nil, fmt.Sprintf("Failed to locate machine profile %s on GCP, error: %s", plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString(), err.Error()) + } + provisioningScheme.SetMachineProfilePath(machineProfilePath) + } + + if plan.ProvisioningScheme.GcpMachineConfig.WritebackCache != nil { + provisioningScheme.SetUseWriteBackCache(true) + provisioningScheme.SetWriteBackCacheDiskSizeGB(int32(plan.ProvisioningScheme.GcpMachineConfig.WritebackCache.WriteBackCacheDiskSizeGB.ValueInt64())) + if !plan.ProvisioningScheme.GcpMachineConfig.WritebackCache.WriteBackCacheMemorySizeMB.IsNull() { + provisioningScheme.SetWriteBackCacheMemorySizeMB(int32(plan.ProvisioningScheme.GcpMachineConfig.WritebackCache.WriteBackCacheMemorySizeMB.ValueInt64())) + } + if plan.ProvisioningScheme.GcpMachineConfig.WritebackCache.PersistVm.ValueBool() && !plan.ProvisioningScheme.GcpMachineConfig.WritebackCache.PersistOsDisk.ValueBool() { + return nil, "Could not set persist_vm attribute, which can only be set when persist_os_disk = true" + } + + } + } + + if plan.ProvisioningScheme.NetworkMapping != nil { + networkMapping, err := parseNetworkMappingToClientModel(*plan.ProvisioningScheme.NetworkMapping, hypervisorResourcePool) + if err != nil { + return nil, err.Error() + } + provisioningScheme.SetNetworkMapping(networkMapping) + } + + customProperties := parseCustomPropertiesToClientModel(*plan.ProvisioningScheme, hypervisor.ConnectionType) + provisioningScheme.SetCustomProperties(customProperties) + + return &provisioningScheme, "" +} + +func setProvSchemePropertiesForUpdateCatalog(plan MachineCatalogResourceModel, body citrixorchestration.UpdateMachineCatalogRequestModel, ctx context.Context, client *citrixdaasclient.CitrixDaasClient, diagnostics *diag.Diagnostics, connectionType *citrixorchestration.HypervisorConnectionType) (citrixorchestration.UpdateMachineCatalogRequestModel, error) { + hypervisor, err := util.GetHypervisor(ctx, client, diagnostics, plan.ProvisioningScheme.Hypervisor.ValueString()) + if err != nil { + return body, err + } + + hypervisorResourcePool, err := util.GetHypervisorResourcePool(ctx, client, diagnostics, plan.ProvisioningScheme.Hypervisor.ValueString(), plan.ProvisioningScheme.HypervisorResourcePool.ValueString()) + if err != nil { + return body, err + } + + // Resolve resource path for service offering and master image + switch hypervisor.GetConnectionType() { + case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: + serviceOffering := plan.ProvisioningScheme.AzureMachineConfig.ServiceOffering.ValueString() + queryPath := "serviceoffering.folder" + serviceOfferingPath, err := util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, serviceOffering, "serviceoffering", "") + if err != nil { + diagnostics.AddError( + "Error updating Machine Catalog", + fmt.Sprintf("Failed to resolve service offering %s on Azure, error: %s", serviceOffering, err.Error()), + ) + return body, err + } + body.SetServiceOfferingPath(serviceOfferingPath) + if machineProfile := plan.ProvisioningScheme.AzureMachineConfig.MachineProfile; machineProfile != nil { + machineProfileName := machineProfile.MachineProfileVmName.ValueString() + if machineProfileName != "" { + machineProfileResourceGroup := plan.ProvisioningScheme.AzureMachineConfig.MachineProfile.MachineProfileResourceGroup.ValueString() + queryPath = fmt.Sprintf("machineprofile.folder\\%s.resourcegroup", machineProfileResourceGroup) + machineProfilePath, err := util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, machineProfileName, "vm", "") + if err != nil { + diagnostics.AddError( + "Error updating Machine Catalog", + fmt.Sprintf("Failed to locate machine profile %s on Azure, error: %s", plan.ProvisioningScheme.AzureMachineConfig.MachineProfile.MachineProfileVmName.ValueString(), err.Error()), + ) + return body, err + } + body.SetMachineProfilePath(machineProfilePath) + } + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: + serviceOffering := plan.ProvisioningScheme.AwsMachineConfig.ServiceOffering.ValueString() + serviceOfferingPath, err := util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), "", serviceOffering, "serviceoffering", "") + if err != nil { + diagnostics.AddError( + "Error updating Machine Catalog", + fmt.Sprintf("Failed to resolve service offering %s on AWS, error: %s", serviceOffering, err.Error()), + ) + return body, err + } + body.SetServiceOfferingPath(serviceOfferingPath) + case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: + machineProfile := plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString() + if machineProfile != "" { + machineProfilePath, err := util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), "", plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString(), "vm", "") + if err != nil { + diagnostics.AddError( + "Error updating Machine Catalog", + fmt.Sprintf("Failed to locate machine profile %s on GCP, error: %s", plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString(), err.Error()), + ) + return body, err + } + body.SetMachineProfilePath(machineProfilePath) + } + } + + if plan.ProvisioningScheme.NetworkMapping != nil { + networkMapping, err := parseNetworkMappingToClientModel(*plan.ProvisioningScheme.NetworkMapping, hypervisorResourcePool) + if err != nil { + diagnostics.AddError( + "Error updating Machine Catalog", + fmt.Sprintf("Failed to parse network mapping, error: %s", err.Error()), + ) + return body, err + } + body.SetNetworkMapping(networkMapping) + } + + customProperties := parseCustomPropertiesToClientModel(*plan.ProvisioningScheme, hypervisor.ConnectionType) + body.SetCustomProperties(customProperties) + + return body, nil +} + +func generateAdminCredentialHeader(plan MachineCatalogResourceModel) string { + credential := fmt.Sprintf("%s\\%s:%s", plan.ProvisioningScheme.MachineDomainIdentity.Domain.ValueString(), plan.ProvisioningScheme.MachineDomainIdentity.ServiceAccount.ValueString(), plan.ProvisioningScheme.MachineDomainIdentity.ServiceAccountPassword.ValueString()) + encodedData := base64.StdEncoding.EncodeToString([]byte(credential)) + header := fmt.Sprintf("Basic %s", encodedData) + + return header +} + +func deleteMachinesFromMcsCatalog(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.UpdateResponse, catalog *citrixorchestration.MachineCatalogDetailResponseModel, plan MachineCatalogResourceModel) error { + catalogId := catalog.GetId() + catalogName := catalog.GetName() + + if catalog.GetAllocationType() != citrixorchestration.ALLOCATIONTYPE_RANDOM { + resp.Diagnostics.AddError( + "Error updating Machine Catalog "+catalogName, + "Deleting machine(s) is supported for machine catalogs with Random allocation type only.", + ) + return fmt.Errorf("deleting machine(s) is supported for machine catalogs with Random allocation type only") + } + + getMachinesResponse, err := util.GetMachineCatalogMachines(ctx, client, &resp.Diagnostics, catalogId) + if err != nil { + return err + } + + machineDeleteRequestCount := int(catalog.GetTotalCount()) - int(plan.ProvisioningScheme.NumTotalMachines.ValueInt64()) + machinesToDelete := []citrixorchestration.MachineResponseModel{} + + for _, machine := range getMachinesResponse.GetItems() { + if !machine.GetDeliveryGroup().Id.IsSet() || machine.GetSessionCount() == 0 { + machinesToDelete = append(machinesToDelete, machine) + } + + if len(machinesToDelete) == machineDeleteRequestCount { + break + } + } + + machinesToDeleteCount := len(machinesToDelete) + + if machineDeleteRequestCount > machinesToDeleteCount { + errorString := fmt.Sprintf("%d machine(s) requested to be deleted. %d machine(s) qualify for deletion.", machineDeleteRequestCount, machinesToDeleteCount) + + resp.Diagnostics.AddError( + "Error deleting machine(s) from Machine Catalog "+catalogName, + errorString+" Ensure machine that needs to be deleted has no active sessions.", + ) + + return err + } + + return deleteMachinesFromCatalog(ctx, client, resp, plan, machinesToDelete, catalogName, true) +} + +func addMachinesToMcsCatalog(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.UpdateResponse, catalog *citrixorchestration.MachineCatalogDetailResponseModel, plan MachineCatalogResourceModel) error { + catalogId := catalog.GetId() + catalogName := catalog.GetName() + + addMachinesCount := int32(plan.ProvisioningScheme.NumTotalMachines.ValueInt64()) - catalog.GetTotalCount() + + var updateMachineAccountCreationRule citrixorchestration.UpdateMachineAccountCreationRulesRequestModel + updateMachineAccountCreationRule.SetNamingScheme(plan.ProvisioningScheme.MachineAccountCreationRules.NamingScheme.ValueString()) + namingScheme, err := citrixorchestration.NewNamingSchemeTypeFromValue(plan.ProvisioningScheme.MachineAccountCreationRules.NamingSchemeType.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error adding Machine to Machine Catalog "+catalogName, + "Unsupported machine account naming scheme type.", + ) + return err + } + updateMachineAccountCreationRule.SetNamingSchemeType(*namingScheme) + if plan.ProvisioningScheme.MachineDomainIdentity != nil { + updateMachineAccountCreationRule.SetDomain(plan.ProvisioningScheme.MachineDomainIdentity.Domain.ValueString()) + updateMachineAccountCreationRule.SetOU(plan.ProvisioningScheme.MachineDomainIdentity.Ou.ValueString()) + } + + var addMachineRequestBody citrixorchestration.AddMachineToMachineCatalogDetailRequestModel + addMachineRequestBody.SetMachineAccountCreationRules(updateMachineAccountCreationRule) + + addMachineRequestStringBody, err := util.ConvertToString(addMachineRequestBody) + if err != nil { + resp.Diagnostics.AddError( + "Error adding Machine to Machine Catalog "+catalogName, + "An unexpected error occurred: "+err.Error(), + ) + return err + } + + batchApiHeaders, httpResp, err := generateBatchApiHeaders(client, plan, true) + txId := citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp) + if err != nil { + resp.Diagnostics.AddError( + "Error updating Machine Catalog "+catalogName, + "TransactionId: "+txId+ + "\nCould not add machine to Machine Catalog, unexpected error: "+util.ReadClientError(err), + ) + return err + } + + batchRequestItems := []citrixorchestration.BatchRequestItemModel{} + relativeUrl := fmt.Sprintf("/MachineCatalogs/%s/Machines?async=true", catalogId) + for i := 0; i < int(addMachinesCount); i++ { + var batchRequestItem citrixorchestration.BatchRequestItemModel + batchRequestItem.SetMethod(http.MethodPost) + batchRequestItem.SetReference(strconv.Itoa(i)) + batchRequestItem.SetRelativeUrl(client.GetBatchRequestItemRelativeUrl(relativeUrl)) + batchRequestItem.SetBody(addMachineRequestStringBody) + batchRequestItem.SetHeaders(batchApiHeaders) + batchRequestItems = append(batchRequestItems, batchRequestItem) + } + + var batchRequestModel citrixorchestration.BatchRequestModel + batchRequestModel.SetItems(batchRequestItems) + successfulJobs, txId, err := citrixdaasclient.PerformBatchOperation(ctx, client, batchRequestModel) + if err != nil { + resp.Diagnostics.AddError( + "Error adding machine(s) to Machine Catalog "+catalogName, + "TransactionId: "+txId+ + "\nError message: "+util.ReadClientError(err), + ) + return err + } + + if successfulJobs < int(addMachinesCount) { + errMsg := fmt.Sprintf("An error occurred while adding machine(s) to the Machine Catalog. %d of %d machines were added to the Machine Catalog.", successfulJobs, addMachinesCount) + err = fmt.Errorf(errMsg) + resp.Diagnostics.AddError( + "Error updating Machine Catalog "+catalogName, + "TransactionId: "+txId+ + "\n"+errMsg, + ) + + return err + } + + return nil +} + +func updateCatalogImage(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.UpdateResponse, catalog *citrixorchestration.MachineCatalogDetailResponseModel, plan MachineCatalogResourceModel) error { + + catalogName := catalog.GetName() + catalogId := catalog.GetId() + + provScheme := catalog.GetProvisioningScheme() + masterImage := provScheme.GetMasterImage() + + hypervisor, errResp := util.GetHypervisor(ctx, client, &resp.Diagnostics, plan.ProvisioningScheme.Hypervisor.ValueString()) + if errResp != nil { + return errResp + } + + hypervisorResourcePool, errResp := util.GetHypervisorResourcePool(ctx, client, &resp.Diagnostics, plan.ProvisioningScheme.Hypervisor.ValueString(), plan.ProvisioningScheme.HypervisorResourcePool.ValueString()) + if errResp != nil { + return errResp + } + + // Check if XDPath has changed for the image + imagePath := "" + var err error + switch hypervisor.GetConnectionType() { + case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: + newImage := plan.ProvisioningScheme.AzureMachineConfig.MasterImage.ValueString() + resourceGroup := plan.ProvisioningScheme.AzureMachineConfig.ResourceGroup.ValueString() + if newImage != "" { + storageAccount := plan.ProvisioningScheme.AzureMachineConfig.StorageAccount.ValueString() + container := plan.ProvisioningScheme.AzureMachineConfig.Container.ValueString() + if storageAccount != "" && container != "" { + queryPath := fmt.Sprintf( + "image.folder\\%s.resourcegroup\\%s.storageaccount\\%s.container", + resourceGroup, + storageAccount, + container) + imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, newImage, "", "") + if err != nil { + resp.Diagnostics.AddError( + "Error updating Machine Catalog", + fmt.Sprintf("Failed to resolve master image VHD %s in container %s of storage account %s, error: %s", newImage, container, storageAccount, err.Error()), + ) + return err + } + } else { + queryPath := fmt.Sprintf( + "image.folder\\%s.resourcegroup", + resourceGroup) + imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, newImage, "", "") + if err != nil { + resp.Diagnostics.AddError( + "Error updating Machine Catalog", + fmt.Sprintf("Failed to resolve master image Managed Disk or Snapshot %s, error: %s", newImage, err.Error()), + ) + return err + } + } + } else if plan.ProvisioningScheme.AzureMachineConfig.GalleryImage != nil { + gallery := plan.ProvisioningScheme.AzureMachineConfig.GalleryImage.Gallery.ValueString() + definition := plan.ProvisioningScheme.AzureMachineConfig.GalleryImage.Definition.ValueString() + version := plan.ProvisioningScheme.AzureMachineConfig.GalleryImage.Version.ValueString() + if gallery != "" && definition != "" { + queryPath := fmt.Sprintf( + "image.folder\\%s.resourcegroup\\%s.gallery\\%s.imagedefinition", + resourceGroup, + gallery, + definition) + imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, version, "", "") + if err != nil { + resp.Diagnostics.AddError( + "Error updating Machine Catalog", + fmt.Sprintf("Failed to locate Azure Image Gallery image %s of version %s in gallery %s, error: %s", newImage, version, gallery, err.Error()), + ) + return err + } + } + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: + imageId := fmt.Sprintf("%s (%s)", plan.ProvisioningScheme.AwsMachineConfig.MasterImage.ValueString(), plan.ProvisioningScheme.AwsMachineConfig.ImageAmi.ValueString()) + imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), "", imageId, "template", "") + if err != nil { + resp.Diagnostics.AddError( + "Error updating Machine Catalog", + fmt.Sprintf("Failed to locate AWS image %s with AMI %s, error: %s", plan.ProvisioningScheme.AwsMachineConfig.MasterImage.ValueString(), plan.ProvisioningScheme.AwsMachineConfig.ImageAmi.ValueString(), err.Error()), + ) + return err + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: + newImage := plan.ProvisioningScheme.GcpMachineConfig.MasterImage.ValueString() + snapshot := plan.ProvisioningScheme.GcpMachineConfig.MachineSnapshot.ValueString() + if snapshot != "" { + queryPath := fmt.Sprintf("%s.vm", newImage) + imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, plan.ProvisioningScheme.GcpMachineConfig.MachineSnapshot.ValueString(), "snapshot", "") + if err != nil { + resp.Diagnostics.AddError( + "Error updating Machine Catalog", + fmt.Sprintf("Failed to locate master image snapshot %s on GCP, error: %s", plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString(), err.Error()), + ) + return err + } + } else { + imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), "", newImage, "vm", "") + if err != nil { + resp.Diagnostics.AddError( + "Error updating Machine Catalog", + fmt.Sprintf("Failed to locate master image machine %s on GCP, error: %s", plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString(), err.Error()), + ) + return err + } + } + } + + if masterImage.GetXDPath() == imagePath { + return nil + } + + // Update Master Image for Machine Catalog + var updateProvisioningSchemeModel citrixorchestration.UpdateMachineCatalogProvisioningSchemeRequestModel + var rebootOption citrixorchestration.RebootMachinesRequestModel + + // Update the image immediately + rebootOption.SetRebootDuration(60) + rebootOption.SetWarningDuration(15) + rebootOption.SetWarningMessage("Warning: An important update is about to be installed. To ensure that no loss of data occurs, save any outstanding work and close all applications.") + updateProvisioningSchemeModel.SetRebootOptions(rebootOption) + updateProvisioningSchemeModel.SetMasterImagePath(imagePath) + updateProvisioningSchemeModel.SetStoreOldImage(true) + updateProvisioningSchemeModel.SetMinimumFunctionalLevel("L7_20") + updateMasterImageRequest := client.ApiClient.MachineCatalogsAPIsDAAS.MachineCatalogsUpdateMachineCatalogProvisioningScheme(ctx, catalogId) + updateMasterImageRequest = updateMasterImageRequest.UpdateMachineCatalogProvisioningSchemeRequestModel(updateProvisioningSchemeModel) + _, httpResp, err := citrixdaasclient.AddRequestData(updateMasterImageRequest, client).Async(true).Execute() + if err != nil { + resp.Diagnostics.AddError( + "Error updating Image for Machine Catalog "+catalogName, + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + } + + err = util.ProcessAsyncJobResponse(ctx, client, httpResp, "Error updating Image for Machine Catalog "+catalogName, &resp.Diagnostics, 60, false) + if err != nil { + return err + } + + return nil +} + +func (r MachineCatalogResourceModel) updateCatalogWithProvScheme(catalog *citrixorchestration.MachineCatalogDetailResponseModel, connectionType *citrixorchestration.HypervisorConnectionType) MachineCatalogResourceModel { + if r.ProvisioningScheme == nil { + r.ProvisioningScheme = &ProvisioningSchemeModel{} + } + + provScheme := catalog.GetProvisioningScheme() + resourcePool := provScheme.GetResourcePool() + hypervisor := resourcePool.GetHypervisor() + machineAccountCreateRules := provScheme.GetMachineAccountCreationRules() + domain := machineAccountCreateRules.GetDomain() + customProperties := provScheme.GetCustomProperties() + + // Refresh Hypervisor and Resource Pool + r.ProvisioningScheme.Hypervisor = types.StringValue(hypervisor.GetId()) + r.ProvisioningScheme.HypervisorResourcePool = types.StringValue(resourcePool.GetId()) + + switch *connectionType { + case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: + if r.ProvisioningScheme.AzureMachineConfig == nil { + r.ProvisioningScheme.AzureMachineConfig = &AzureMachineConfigModel{} + } + + r.ProvisioningScheme.AzureMachineConfig.RefreshProperties(*catalog) + + for _, stringPair := range customProperties { + if stringPair.GetName() == "Zones" && !r.ProvisioningScheme.AvailabilityZones.IsNull() { + r.ProvisioningScheme.AvailabilityZones = types.StringValue(stringPair.GetValue()) + } + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: + if r.ProvisioningScheme.AwsMachineConfig == nil { + r.ProvisioningScheme.AwsMachineConfig = &AwsMachineConfigModel{} + } + r.ProvisioningScheme.AwsMachineConfig.RefreshProperties(*catalog) + + for _, stringPair := range customProperties { + if stringPair.GetName() == "Zones" { + r.ProvisioningScheme.AvailabilityZones = types.StringValue(stringPair.GetValue()) + } + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: + if r.ProvisioningScheme.GcpMachineConfig == nil { + r.ProvisioningScheme.GcpMachineConfig = &GcpMachineConfigModel{} + } + + r.ProvisioningScheme.GcpMachineConfig.RefreshProperties(*catalog) + + for _, stringPair := range customProperties { + if stringPair.GetName() == "CatalogZones" && !r.ProvisioningScheme.AvailabilityZones.IsNull() { + r.ProvisioningScheme.AvailabilityZones = types.StringValue(stringPair.GetValue()) + } + } + } + + // Refresh Total Machine Count + r.ProvisioningScheme.NumTotalMachines = types.Int64Value(int64(provScheme.GetMachineCount())) + + // Refresh Total Machine Count + if identityType := types.StringValue(reflect.ValueOf(provScheme.GetIdentityType()).String()); identityType.ValueString() != "" { + r.ProvisioningScheme.IdentityType = identityType + } else { + r.ProvisioningScheme.IdentityType = types.StringNull() + } + + // Refresh Network Mapping + networkMaps := provScheme.GetNetworkMaps() + + if len(networkMaps) > 0 && r.ProvisioningScheme.NetworkMapping != nil { + r.ProvisioningScheme.NetworkMapping = &NetworkMappingModel{} + r.ProvisioningScheme.NetworkMapping.NetworkDevice = types.StringValue(networkMaps[0].GetDeviceId()) + network := networkMaps[0].GetNetwork() + segments := strings.Split(network.GetXDPath(), "\\") + lastIndex := len(segments) + r.ProvisioningScheme.NetworkMapping.Network = types.StringValue(strings.Split((strings.Split(segments[lastIndex-1], "."))[0], " ")[0]) + } else { + r.ProvisioningScheme.NetworkMapping = nil + } + + // Identity Pool Properties + if r.ProvisioningScheme.MachineAccountCreationRules == nil { + r.ProvisioningScheme.MachineAccountCreationRules = &MachineAccountCreationRulesModel{} + } + r.ProvisioningScheme.MachineAccountCreationRules.NamingScheme = types.StringValue(machineAccountCreateRules.GetNamingScheme()) + namingSchemeType := machineAccountCreateRules.GetNamingSchemeType() + r.ProvisioningScheme.MachineAccountCreationRules.NamingSchemeType = types.StringValue(reflect.ValueOf(namingSchemeType).String()) + + // Domain Identity Properties + if r.ProvisioningScheme.MachineDomainIdentity == nil { + r.ProvisioningScheme.MachineDomainIdentity = &MachineDomainIdentityModel{} + } + + if domain.GetName() != "" { + r.ProvisioningScheme.MachineDomainIdentity.Domain = types.StringValue(domain.GetName()) + } + if machineAccountCreateRules.GetOU() != "" { + r.ProvisioningScheme.MachineDomainIdentity.Ou = types.StringValue(machineAccountCreateRules.GetOU()) + } + + return r +} + +func parseCustomPropertiesToClientModel(provisioningScheme ProvisioningSchemeModel, connectionType citrixorchestration.HypervisorConnectionType) []citrixorchestration.NameValueStringPairModel { + var res = &[]citrixorchestration.NameValueStringPairModel{} + switch connectionType { + case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: + if !provisioningScheme.AvailabilityZones.IsNull() { + util.AppendNameValueStringPair(res, "Zones", provisioningScheme.AvailabilityZones.ValueString()) + } else { + util.AppendNameValueStringPair(res, "Zones", "") + } + if !provisioningScheme.AzureMachineConfig.StorageType.IsNull() { + util.AppendNameValueStringPair(res, "StorageType", provisioningScheme.AzureMachineConfig.StorageType.ValueString()) + } + if !provisioningScheme.AzureMachineConfig.VdaResourceGroup.IsNull() { + util.AppendNameValueStringPair(res, "ResourceGroups", provisioningScheme.AzureMachineConfig.VdaResourceGroup.ValueString()) + } + if !provisioningScheme.AzureMachineConfig.UseManagedDisks.IsNull() { + if provisioningScheme.AzureMachineConfig.UseManagedDisks.ValueBool() { + util.AppendNameValueStringPair(res, "UseManagedDisks", "true") + } else { + util.AppendNameValueStringPair(res, "UseManagedDisks", "false") + } + } + if provisioningScheme.AzureMachineConfig.WritebackCache != nil { + if !provisioningScheme.AzureMachineConfig.WritebackCache.WBCDiskStorageType.IsNull() { + util.AppendNameValueStringPair(res, "WBCDiskStorageType", provisioningScheme.AzureMachineConfig.WritebackCache.WBCDiskStorageType.ValueString()) + } + if provisioningScheme.AzureMachineConfig.WritebackCache.PersistWBC.ValueBool() { + util.AppendNameValueStringPair(res, "PersistWBC", "true") + if provisioningScheme.AzureMachineConfig.WritebackCache.StorageCostSaving.ValueBool() { + util.AppendNameValueStringPair(res, "StorageTypeAtShutdown", "Standard_LRS") + } + } + if provisioningScheme.AzureMachineConfig.WritebackCache.PersistOsDisk.ValueBool() { + util.AppendNameValueStringPair(res, "PersistOsDisk", "true") + if provisioningScheme.AzureMachineConfig.WritebackCache.PersistVm.ValueBool() { + util.AppendNameValueStringPair(res, "PersistVm", "true") + } + } + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: + if !provisioningScheme.AvailabilityZones.IsNull() { + util.AppendNameValueStringPair(res, "Zones", provisioningScheme.AvailabilityZones.ValueString()) + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: + if !provisioningScheme.AvailabilityZones.IsNull() { + util.AppendNameValueStringPair(res, "CatalogZones", provisioningScheme.AvailabilityZones.ValueString()) + } + if !provisioningScheme.GcpMachineConfig.StorageType.IsNull() { + util.AppendNameValueStringPair(res, "StorageType", provisioningScheme.GcpMachineConfig.StorageType.ValueString()) + } + if provisioningScheme.GcpMachineConfig.WritebackCache != nil { + if !provisioningScheme.GcpMachineConfig.WritebackCache.WBCDiskStorageType.IsNull() { + util.AppendNameValueStringPair(res, "WBCDiskStorageType", provisioningScheme.GcpMachineConfig.WritebackCache.WBCDiskStorageType.ValueString()) + } + if provisioningScheme.GcpMachineConfig.WritebackCache.PersistWBC.ValueBool() { + util.AppendNameValueStringPair(res, "PersistWBC", "true") + } + if provisioningScheme.GcpMachineConfig.WritebackCache.PersistOsDisk.ValueBool() { + util.AppendNameValueStringPair(res, "PersistOsDisk", "true") + } + } + } + + return *res +} + +func parseNetworkMappingToClientModel(networkMapping NetworkMappingModel, resourcePool *citrixorchestration.HypervisorResourcePoolDetailResponseModel) ([]citrixorchestration.NetworkMapRequestModel, error) { + var networks []citrixorchestration.HypervisorResourceRefResponseModel + if resourcePool.ConnectionType == citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM { + networks = resourcePool.Subnets + } else if resourcePool.ConnectionType == citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS || resourcePool.ConnectionType == citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM { + networks = resourcePool.Networks + } + + var res = []citrixorchestration.NetworkMapRequestModel{} + var networkName string + if resourcePool.ConnectionType == citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM || resourcePool.ConnectionType == citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM { + networkName = networkMapping.Network.ValueString() + } else if resourcePool.ConnectionType == citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS { + networkName = fmt.Sprintf("%s (%s)", networkMapping.Network.ValueString(), resourcePool.GetResourcePoolRootId()) + } + network := slices.IndexFunc(networks, func(c citrixorchestration.HypervisorResourceRefResponseModel) bool { return c.GetName() == networkName }) + if network == -1 { + return res, fmt.Errorf("network %s not found", networkName) + } + + res = append(res, citrixorchestration.NetworkMapRequestModel{ + NetworkDeviceNameOrId: *citrixorchestration.NewNullableString(networkMapping.NetworkDevice.ValueStringPointer()), + NetworkPath: networks[network].GetXDPath(), + }) + return res, nil +} diff --git a/internal/daas/machine_catalog/machine_catalog_remote_pc_utils.go b/internal/daas/machine_catalog/machine_catalog_remote_pc_utils.go new file mode 100644 index 0000000..d111dc9 --- /dev/null +++ b/internal/daas/machine_catalog/machine_catalog_remote_pc_utils.go @@ -0,0 +1,45 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package machine_catalog + +import ( + "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + "github.com/citrix/terraform-provider-citrix/internal/util" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func getRemotePcEnrollmentScopes(plan MachineCatalogResourceModel, includeMachines bool) []citrixorchestration.RemotePCEnrollmentScopeRequestModel { + remotePCEnrollmentScopes := []citrixorchestration.RemotePCEnrollmentScopeRequestModel{} + if plan.RemotePcOus != nil { + for _, ou := range plan.RemotePcOus { + var remotePCEnrollmentScope citrixorchestration.RemotePCEnrollmentScopeRequestModel + remotePCEnrollmentScope.SetIncludeSubfolders(ou.IncludeSubFolders.ValueBool()) + remotePCEnrollmentScope.SetOU(ou.OUName.ValueString()) + remotePCEnrollmentScope.SetIsOrganizationalUnit(true) + remotePCEnrollmentScopes = append(remotePCEnrollmentScopes, remotePCEnrollmentScope) + } + } + + if includeMachines && plan.MachineAccounts != nil { + for _, machineAccount := range plan.MachineAccounts { + for _, machine := range machineAccount.Machines { + var remotePCEnrollmentScope citrixorchestration.RemotePCEnrollmentScopeRequestModel + remotePCEnrollmentScope.SetIncludeSubfolders(false) + remotePCEnrollmentScope.SetOU(machine.MachineAccount.ValueString()) + remotePCEnrollmentScope.SetIsOrganizationalUnit(false) + remotePCEnrollmentScopes = append(remotePCEnrollmentScopes, remotePCEnrollmentScope) + } + } + } + + return remotePCEnrollmentScopes +} + +func (r MachineCatalogResourceModel) updateCatalogWithRemotePcConfig(catalog *citrixorchestration.MachineCatalogDetailResponseModel) MachineCatalogResourceModel { + if catalog.GetProvisioningType() == citrixorchestration.PROVISIONINGTYPE_MANUAL || !r.IsRemotePc.IsNull() { + r.IsRemotePc = types.BoolValue(catalog.GetIsRemotePC()) + } + rpcOUs := util.RefreshListProperties[RemotePcOuModel, citrixorchestration.RemotePCEnrollmentScopeResponseModel](r.RemotePcOus, "OUName", catalog.GetRemotePCEnrollmentScopes(), "OU", "RefreshListItem") + r.RemotePcOus = rpcOUs + return r +} diff --git a/internal/daas/machine_catalog/machine_catalog_resource.go b/internal/daas/machine_catalog/machine_catalog_resource.go new file mode 100644 index 0000000..e6eeeb9 --- /dev/null +++ b/internal/daas/machine_catalog/machine_catalog_resource.go @@ -0,0 +1,500 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package machine_catalog + +import ( + "context" + "fmt" + "net/http" + + citrixorchestration "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + citrixdaasclient "github.com/citrix/citrix-daas-rest-go/client" + "github.com/citrix/terraform-provider-citrix/internal/util" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &machineCatalogResource{} + _ resource.ResourceWithConfigure = &machineCatalogResource{} + _ resource.ResourceWithImportState = &machineCatalogResource{} + _ resource.ResourceWithValidateConfig = &machineCatalogResource{} +) + +// NewMachineCatalogResource is a helper function to simplify the provider implementation. +func NewMachineCatalogResource() resource.Resource { + return &machineCatalogResource{} +} + +// machineCatalogResource is the resource implementation. +type machineCatalogResource struct { + client *citrixdaasclient.CitrixDaasClient +} + +// Metadata returns the resource type name. +func (r *machineCatalogResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_machine_catalog" +} + +// Schema defines the schema for the resource. +func (r *machineCatalogResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = getSchemaForMachineCatalogResource() +} + +// Configure adds the provider configured client to the resource. +func (r *machineCatalogResource) Configure(_ context.Context, req resource.ConfigureRequest, _ *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + r.client = req.ProviderData.(*citrixdaasclient.CitrixDaasClient) +} + +// Create creates the resource and sets the initial Terraform state. +func (r *machineCatalogResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Retrieve values from plan + var plan MachineCatalogResourceModel + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var connectionType citrixorchestration.HypervisorConnectionType + + body, err := getRequestModelForCreateMachineCatalog(plan, ctx, r.client, &resp.Diagnostics, &connectionType, r.client.AuthConfig.OnPremises) + + if err != nil { + return + } + + createMachineCatalogRequest := r.client.ApiClient.MachineCatalogsAPIsDAAS.MachineCatalogsCreateMachineCatalog(ctx) + + // Add domain credential header + if plan.ProvisioningType.ValueString() == string(citrixorchestration.PROVISIONINGTYPE_MCS) && plan.ProvisioningScheme.MachineDomainIdentity != nil { + header := generateAdminCredentialHeader(plan) + createMachineCatalogRequest = createMachineCatalogRequest.XAdminCredential(header) + } + + // Add request body + createMachineCatalogRequest = createMachineCatalogRequest.CreateMachineCatalogRequestModel(*body) + + // Make request async + createMachineCatalogRequest = createMachineCatalogRequest.Async(true) + + // Create new machine catalog + _, httpResp, err := citrixdaasclient.AddRequestData(createMachineCatalogRequest, r.client).Execute() + if err != nil { + resp.Diagnostics.AddError( + "Error creating Machine Catalog", + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + return + } + + err = util.ProcessAsyncJobResponse(ctx, r.client, httpResp, "Error creating Machine Catalog", &resp.Diagnostics, 120, false) + if err != nil { + return + } + + // Get the new catalog + catalog, err := util.GetMachineCatalog(ctx, r.client, &resp.Diagnostics, plan.Name.ValueString(), true) + + if err != nil { + return + } + + machines, err := util.GetMachineCatalogMachines(ctx, r.client, &resp.Diagnostics, catalog.GetId()) + + if err != nil { + return + } + + // Map response body to schema and populate Computed attribute values + plan = plan.RefreshPropertyValues(ctx, r.client, catalog, &connectionType, machines) + + // Set state to fully populated data + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *machineCatalogResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Get current state + var state MachineCatalogResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get refreshed machine catalog state from Orchestration + catalogId := state.Id.ValueString() + + catalog, _, err := readMachineCatalog(ctx, r.client, resp, catalogId) + if err != nil { + return + } + + machineCatalogMachines, err := util.GetMachineCatalogMachines(ctx, r.client, &resp.Diagnostics, catalogId) + if err != nil { + return + } + + // Resolve resource path for service offering and master image + provScheme := catalog.GetProvisioningScheme() + resourcePool := provScheme.GetResourcePool() + hypervisor := resourcePool.GetHypervisor() + hypervisorName := hypervisor.GetName() + + var connectionType *citrixorchestration.HypervisorConnectionType + + if hypervisorName != "" { + hypervisor, err := util.GetHypervisor(ctx, r.client, &resp.Diagnostics, hypervisorName) + if err != nil { + return + } + connectionType = hypervisor.GetConnectionType().Ptr() + } + // Overwrite items with refreshed state + state = state.RefreshPropertyValues(ctx, r.client, catalog, connectionType, machineCatalogMachines) + + // Set refreshed state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *machineCatalogResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Retrieve values from plan + var plan MachineCatalogResourceModel + var state MachineCatalogResourceModel + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + diags = req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get refreshed machine catalogs from Orchestration + catalogId := plan.Id.ValueString() + catalogName := plan.Name.ValueString() + catalog, err := util.GetMachineCatalog(ctx, r.client, &resp.Diagnostics, catalogId, true) + + if err != nil { + return + } + + var connectionType citrixorchestration.HypervisorConnectionType + + body, err := getRequestModelForUpdateMachineCatalog(plan, state, catalog, ctx, r.client, resp, &connectionType, r.client.AuthConfig.OnPremises) + if err != nil { + return + } + + updateMachineCatalogRequest := r.client.ApiClient.MachineCatalogsAPIsDAAS.MachineCatalogsUpdateMachineCatalog(ctx, catalogId) + updateMachineCatalogRequest = updateMachineCatalogRequest.UpdateMachineCatalogRequestModel(*body) + _, httpResp, err := citrixdaasclient.AddRequestData(updateMachineCatalogRequest, r.client).Execute() + if err != nil { + resp.Diagnostics.AddError( + "Error updating Machine Catalog "+catalogName, + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + return + } + + provisioningType, err := citrixorchestration.NewProvisioningTypeFromValue(plan.ProvisioningType.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error creating Machine Catalog", + "Unsupported provisioning type.", + ) + + return + } + + if *provisioningType == citrixorchestration.PROVISIONINGTYPE_MANUAL { + // For manual, compare state and plan to find machines to add and delete + addMachinesList, deleteMachinesMap := createAddAndRemoveMachinesListForManualCatalogs(state, plan) + + addMachinesToManualCatalog(ctx, r.client, resp, addMachinesList, catalogId) + deleteMachinesFromManualCatalog(ctx, r.client, resp, deleteMachinesMap, catalogId, catalog.GetIsPowerManaged()) + } else { + err = updateCatalogImage(ctx, r.client, resp, catalog, plan) + + if err != nil { + return + } + + if catalog.GetTotalCount() > int32(plan.ProvisioningScheme.NumTotalMachines.ValueInt64()) { + // delete machines from machine catalog + err = deleteMachinesFromMcsCatalog(ctx, r.client, resp, catalog, plan) + if err != nil { + return + } + } + + if catalog.GetTotalCount() < int32(plan.ProvisioningScheme.NumTotalMachines.ValueInt64()) { + // add machines to machine catalog + err = addMachinesToMcsCatalog(ctx, r.client, resp, catalog, plan) + if err != nil { + return + } + } + } + + // Fetch updated machine catalog from GetMachineCatalog. + catalog, err = util.GetMachineCatalog(ctx, r.client, &resp.Diagnostics, catalogId, true) + if err != nil { + return + } + + machines, err := util.GetMachineCatalogMachines(ctx, r.client, &resp.Diagnostics, catalog.GetId()) + if err != nil { + return + } + + // Update resource state with updated items and timestamp + plan = plan.RefreshPropertyValues(ctx, r.client, catalog, &connectionType, machines) + + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *machineCatalogResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Retrieve values from state + var state MachineCatalogResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + catalogId := state.Id.ValueString() + + catalog, httpResp, err := readMachineCatalog(ctx, r.client, nil, catalogId) + + if err != nil { + if httpResp.StatusCode == http.StatusNotFound { + return + } + + resp.Diagnostics.AddError( + "Error reading Machine Catalog "+catalogId, + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + + return + } + + // Delete existing order + catalogName := state.Name.ValueString() + deleteMachineCatalogRequest := r.client.ApiClient.MachineCatalogsAPIsDAAS.MachineCatalogsDeleteMachineCatalog(ctx, catalogId) + deleteAccountOption := citrixorchestration.MACHINEACCOUNTDELETEOPTION_NONE + deleteVmOption := false + if catalog.ProvisioningType == citrixorchestration.PROVISIONINGTYPE_MCS { + // If there's no provisioning scheme in state, there will not be any machines create by MCS. + // Therefore we will just omit credential for removing machine accounts. + if catalog.ProvisioningScheme != nil { + // Add domain credential header + header := generateAdminCredentialHeader(state) + deleteMachineCatalogRequest = deleteMachineCatalogRequest.XAdminCredential(header) + } + + deleteAccountOption = citrixorchestration.MACHINEACCOUNTDELETEOPTION_DELETE + deleteVmOption = true + } + + deleteMachineCatalogRequest = deleteMachineCatalogRequest.DeleteVm(deleteVmOption).DeleteAccount(deleteAccountOption).Async(true) + httpResp, err = citrixdaasclient.AddRequestData(deleteMachineCatalogRequest, r.client).Execute() + if err != nil && httpResp.StatusCode != http.StatusNotFound { + resp.Diagnostics.AddError( + "Error deleting Machine Catalog "+catalogName, + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + return + } + + err = util.ProcessAsyncJobResponse(ctx, r.client, httpResp, "Error deleting Machine Catalog "+catalogName, &resp.Diagnostics, 60, false) + if err != nil { + return + } +} + +func (r *machineCatalogResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +func (r *machineCatalogResource) ValidateConfig(ctx context.Context, req resource.ValidateConfigRequest, resp *resource.ValidateConfigResponse) { + var data MachineCatalogResourceModel + diags := req.Config.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + provisioningTypeMcs := string(citrixorchestration.PROVISIONINGTYPE_MCS) + provisioningTypeManual := string(citrixorchestration.PROVISIONINGTYPE_MANUAL) + + if data.ProvisioningType.ValueString() == provisioningTypeMcs { + if data.ProvisioningScheme == nil { + resp.Diagnostics.AddAttributeError( + path.Root("provisioning_scheme"), + "Missing Attribute Configuration", + fmt.Sprintf("Expected provisioning_scheme to be configured when value of provisioning_type is %s.", provisioningTypeMcs), + ) + } + + if data.ProvisioningScheme != nil && data.ProvisioningScheme.AzureMachineConfig != nil && data.ProvisioningScheme.AzureMachineConfig.WritebackCache != nil { + wbc := data.ProvisioningScheme.AzureMachineConfig.WritebackCache + if !wbc.PersistOsDisk.ValueBool() && wbc.PersistVm.ValueBool() { + resp.Diagnostics.AddAttributeError( + path.Root("persist_vm"), + "Incorrect Attribute Configuration", + "persist_os_disk must be enabled to enable persist_vm.", + ) + } + + if !wbc.PersistWBC.ValueBool() && wbc.StorageCostSaving.ValueBool() { + resp.Diagnostics.AddAttributeError( + path.Root("storage_cost_saving"), + "Incorrect Attribute Configuration", + "persist_wbc must be enabled to enable storage_cost_saving.", + ) + } + } + + if data.MachineAccounts != nil { + resp.Diagnostics.AddAttributeError( + path.Root("machine_accounts"), + "Incorrect Attribute Configuration", + fmt.Sprintf("machine_accounts cannot be configured when provisioning_type is %s.", provisioningTypeMcs), + ) + } + + if data.IsRemotePc.ValueBool() { + resp.Diagnostics.AddAttributeError( + path.Root("is_remote_pc"), + "Incorrect Attribute Configuration", + fmt.Sprintf("Remote PC access catalog cannot be created when provisioning_type is %s.", provisioningTypeMcs), + ) + } + + if !data.IsPowerManaged.IsNull() && !data.IsPowerManaged.ValueBool() { + resp.Diagnostics.AddAttributeError( + path.Root("is_power_managed"), + "Incorrect Attribute Configuration", + fmt.Sprintf("Machines have to be power managed when provisioning_type is %s.", provisioningTypeMcs), + ) + } + + data.IsPowerManaged = types.BoolValue(true) // set power managed to true for MCS catalog + } else { + // Manual provisioning type + if data.IsPowerManaged.IsNull() { + resp.Diagnostics.AddAttributeError( + path.Root("is_power_managed"), + "Missing Attribute Configuration", + fmt.Sprintf("expected is_power_managed to be defined when provisioning_type is %s.", provisioningTypeManual), + ) + } + + if data.IsRemotePc.IsNull() { + resp.Diagnostics.AddAttributeError( + path.Root("is_remote_pc"), + "Missing Attribute Configuration", + fmt.Sprintf(" expected is_remote_pc to be defined when provisioning_type is %s.", provisioningTypeManual), + ) + } + + if data.ProvisioningScheme != nil { + resp.Diagnostics.AddAttributeError( + path.Root("provisioning_scheme"), + "Incorrect Attribute Configuration", + fmt.Sprintf("provisioning_scheme cannot be configured when provisioning_type is not %s.", provisioningTypeMcs), + ) + } + + if data.IsPowerManaged.ValueBool() { + if data.MachineAccounts != nil { + for _, machineAccount := range data.MachineAccounts { + if machineAccount.Hypervisor.IsNull() { + resp.Diagnostics.AddAttributeError( + path.Root("machine_accounts"), + "Missing Attribute Configuration", + "Expected hypervisor to be configured when machines are power managed.", + ) + } + + for _, machine := range machineAccount.Machines { + if machine.MachineName.IsNull() { + resp.Diagnostics.AddAttributeError( + path.Root("machine_accounts"), + "Missing Attribute Configuration", + "Expected machine_name to be configured when machines are power managed.", + ) + } + } + } + } + + if data.IsRemotePc.ValueBool() { + resp.Diagnostics.AddAttributeError( + path.Root("is_remote_pc"), + "Incorrect Attribute Configuration", + "Remote PC Access catalog cannot be power managed.", + ) + } + } + } + + if data.IsRemotePc.ValueBool() { + sessionSupport, err := citrixorchestration.NewSessionSupportFromValue(data.SessionSupport.ValueString()) + if err != nil { + resp.Diagnostics.AddAttributeError( + path.Root("session_support"), + "Incorrect Attribute Configuration", + "Unsupported session support.", + ) + return + } + if sessionSupport != nil && *sessionSupport != citrixorchestration.SESSIONSUPPORT_SINGLE_SESSION { + resp.Diagnostics.AddAttributeError( + path.Root("session_support"), + "Incorrect Attribute Configuration", + "Only Single Session is supported for Remote PC Access catalog.", + ) + } + } +} diff --git a/internal/daas/machine_catalog/machine_catalog_resource_model.go b/internal/daas/machine_catalog/machine_catalog_resource_model.go new file mode 100644 index 0000000..5dd7a38 --- /dev/null +++ b/internal/daas/machine_catalog/machine_catalog_resource_model.go @@ -0,0 +1,157 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package machine_catalog + +import ( + "context" + "reflect" + + citrixorchestration "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + citrixclient "github.com/citrix/citrix-daas-rest-go/client" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// MachineCatalogResourceModel maps the resource schema data. +type MachineCatalogResourceModel struct { + Id types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + IsPowerManaged types.Bool `tfsdk:"is_power_managed"` + IsRemotePc types.Bool `tfsdk:"is_remote_pc"` + AllocationType types.String `tfsdk:"allocation_type"` + SessionSupport types.String `tfsdk:"session_support"` + Zone types.String `tfsdk:"zone"` + VdaUpgradeType types.String `tfsdk:"vda_upgrade_type"` + ProvisioningType types.String `tfsdk:"provisioning_type"` + ProvisioningScheme *ProvisioningSchemeModel `tfsdk:"provisioning_scheme"` + MachineAccounts []MachineAccountsModel `tfsdk:"machine_accounts"` + RemotePcOus []RemotePcOuModel `tfsdk:"remote_pc_ous"` +} + +type MachineAccountsModel struct { + Hypervisor types.String `tfsdk:"hypervisor"` + Machines []MachineCatalogMachineModel `tfsdk:"machines"` +} + +type MachineCatalogMachineModel struct { + MachineAccount types.String `tfsdk:"machine_account"` + MachineName types.String `tfsdk:"machine_name"` + Region types.String `tfsdk:"region"` + ResourceGroupName types.String `tfsdk:"resource_group_name"` + ProjectName types.String `tfsdk:"project_name"` + AvailabilityZone types.String `tfsdk:"availability_zone"` + Datacenter types.String `tfsdk:"datacenter"` + Cluster types.String `tfsdk:"cluster"` + Host types.String `tfsdk:"host"` +} + +// ProvisioningSchemeModel maps the nested provisioning scheme resource schema data. +type ProvisioningSchemeModel struct { + Hypervisor types.String `tfsdk:"hypervisor"` + HypervisorResourcePool types.String `tfsdk:"hypervisor_resource_pool"` + AzureMachineConfig *AzureMachineConfigModel `tfsdk:"azure_machine_config"` + AwsMachineConfig *AwsMachineConfigModel `tfsdk:"aws_machine_config"` + GcpMachineConfig *GcpMachineConfigModel `tfsdk:"gcp_machine_config"` + NumTotalMachines types.Int64 `tfsdk:"number_of_total_machines"` + NetworkMapping *NetworkMappingModel `tfsdk:"network_mapping"` + AvailabilityZones types.String `tfsdk:"availability_zones"` + IdentityType types.String `tfsdk:"identity_type"` + MachineDomainIdentity *MachineDomainIdentityModel `tfsdk:"machine_domain_identity"` + MachineAccountCreationRules *MachineAccountCreationRulesModel `tfsdk:"machine_account_creation_rules"` +} + +type MachineProfileModel struct { + MachineProfileVmName types.String `tfsdk:"machine_profile_vm_name"` + MachineProfileResourceGroup types.String `tfsdk:"machine_profile_resource_group"` +} + +type MachineDomainIdentityModel struct { + Domain types.String `tfsdk:"domain"` + Ou types.String `tfsdk:"domain_ou"` + ServiceAccount types.String `tfsdk:"service_account"` + ServiceAccountPassword types.String `tfsdk:"service_account_password"` +} + +type GalleryImageModel struct { + Gallery types.String `tfsdk:"gallery"` + Definition types.String `tfsdk:"definition"` + Version types.String `tfsdk:"version"` +} + +// WritebackCacheModel maps the write back cacheconfiguration schema data. +type WritebackCacheModel struct { + PersistWBC types.Bool `tfsdk:"persist_wbc"` + WBCDiskStorageType types.String `tfsdk:"wbc_disk_storage_type"` + PersistOsDisk types.Bool `tfsdk:"persist_os_disk"` + PersistVm types.Bool `tfsdk:"persist_vm"` + StorageCostSaving types.Bool `tfsdk:"storage_cost_saving"` + WriteBackCacheDiskSizeGB types.Int64 `tfsdk:"writeback_cache_disk_size_gb"` + WriteBackCacheMemorySizeMB types.Int64 `tfsdk:"writeback_cache_memory_size_mb"` +} + +// MachineAccountCreationRulesModel maps the nested machine account creation rules resource schema data. +type MachineAccountCreationRulesModel struct { + NamingScheme types.String `tfsdk:"naming_scheme"` + NamingSchemeType types.String `tfsdk:"naming_scheme_type"` +} + +// NetworkMappingModel maps the nested network mapping resource schema data. +type NetworkMappingModel struct { + NetworkDevice types.String `tfsdk:"network_device"` + Network types.String `tfsdk:"network"` +} + +type RemotePcOuModel struct { + IncludeSubFolders types.Bool `tfsdk:"include_subfolders"` + OUName types.String `tfsdk:"ou_name"` +} + +func (r MachineCatalogResourceModel) RefreshPropertyValues(ctx context.Context, client *citrixclient.CitrixDaasClient, catalog *citrixorchestration.MachineCatalogDetailResponseModel, connectionType *citrixorchestration.HypervisorConnectionType, machines *citrixorchestration.MachineResponseModelCollection) MachineCatalogResourceModel { + // Machine Catalog Properties + r.Id = types.StringValue(catalog.GetId()) + r.Name = types.StringValue(catalog.GetName()) + if catalog.GetDescription() != "" { + r.Description = types.StringValue(catalog.GetDescription()) + } else { + r.Description = types.StringNull() + } + allocationType := catalog.GetAllocationType() + r.AllocationType = types.StringValue(allocationTypeEnumToString(allocationType)) + sessionSupport := catalog.GetSessionSupport() + r.SessionSupport = types.StringValue(reflect.ValueOf(sessionSupport).String()) + + catalogZone := catalog.GetZone() + r.Zone = types.StringValue(catalogZone.GetId()) + + if catalog.UpgradeInfo != nil { + if *catalog.UpgradeInfo.UpgradeType != citrixorchestration.VDAUPGRADETYPE_NOT_SET || !r.VdaUpgradeType.IsNull() { + r.VdaUpgradeType = types.StringValue(string(*catalog.UpgradeInfo.UpgradeType)) + } + } else { + r.VdaUpgradeType = types.StringNull() + } + + provtype := catalog.GetProvisioningType() + r.ProvisioningType = types.StringValue(string(provtype)) + if provtype == citrixorchestration.PROVISIONINGTYPE_MANUAL || !r.IsPowerManaged.IsNull() { + r.IsPowerManaged = types.BoolValue(catalog.GetIsPowerManaged()) + } + + if catalog.ProvisioningType == citrixorchestration.PROVISIONINGTYPE_MANUAL { + // Handle machines + r = r.updateCatalogWithMachines(ctx, client, machines) + } + + r = r.updateCatalogWithRemotePcConfig(catalog) + + if catalog.ProvisioningScheme == nil { + r.ProvisioningScheme = nil + return r + } + + // Provisioning Scheme Properties + r = r.updateCatalogWithProvScheme(catalog, connectionType) + + return r +} diff --git a/internal/daas/machine_catalog/machine_catalog_schema_utils.go b/internal/daas/machine_catalog/machine_catalog_schema_utils.go new file mode 100644 index 0000000..3a8f7ba --- /dev/null +++ b/internal/daas/machine_catalog/machine_catalog_schema_utils.go @@ -0,0 +1,620 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package machine_catalog + +import ( + "regexp" + + citrixorchestration "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + "github.com/citrix/terraform-provider-citrix/internal/util" + "github.com/citrix/terraform-provider-citrix/internal/validators" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/objectvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +func getSchemaForMachineCatalogResource() schema.Schema { + return schema.Schema{ + Description: "Manages a machine catalog.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "GUID identifier of the machine catalog.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "name": schema.StringAttribute{ + Description: "Name of the machine catalog.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "description": schema.StringAttribute{ + Description: "Description of the machine catalog.", + Optional: true, + }, + "is_power_managed": schema.BoolAttribute{ + Description: "Specify if the machines in the machine catalog will be power managed.", + Optional: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + }, + "is_remote_pc": schema.BoolAttribute{ + Description: "Specify if this catalog is for Remote PC access.", + Optional: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + }, + "allocation_type": schema.StringAttribute{ + Description: "Denotes how the machines in the catalog are allocated to a user. Choose between `Static` and `Random`.", + Required: true, + Validators: []validator.String{ + util.GetValidatorFromEnum(citrixorchestration.AllowedAllocationTypeEnumValues), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "session_support": schema.StringAttribute{ + Description: "Session support type. Choose between `SingleSession` and `MultiSession`. Session support should be SingleSession when `is_remote_pc = true`", + Required: true, + Validators: []validator.String{ + util.GetValidatorFromEnum(citrixorchestration.AllowedSessionSupportEnumValues), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "zone": schema.StringAttribute{ + Description: "Id of the zone the machine catalog is associated with.", + Required: true, + }, + "vda_upgrade_type": schema.StringAttribute{ + Description: "Type of Vda Upgrade. Choose between LTSR and CR. When omitted, Vda Upgrade is disabled.", + Optional: true, + Validators: []validator.String{ + stringvalidator.OneOf( + "LTSR", + "CR", + ), + }, + }, + "provisioning_type": schema.StringAttribute{ + Description: "Specifies how the machines are provisioned in the catalog.", + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf( + string(citrixorchestration.PROVISIONINGTYPE_MCS), + string(citrixorchestration.PROVISIONINGTYPE_MANUAL), + ), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "machine_accounts": schema.ListNestedAttribute{ + Description: "List of machine accounts to add to the catalog. Only to be used when using `provisioning_type = MANUAL`", + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "hypervisor": schema.StringAttribute{ + Description: "The Id of the hypervisor in which the machines reside. Required only if `is_power_managed = true`", + Optional: true, + }, + "machines": schema.ListNestedAttribute{ + Description: "List of machines", + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "machine_account": schema.StringAttribute{ + Description: "The Computer AD Account for the machine. Must be in the format DOMAIN\\MACHINE.", + Required: true, + Validators: []validator.String{ + stringvalidator.RegexMatches(regexp.MustCompile(util.SamRegex), "must be in the format DOMAIN\\MACHINE"), + }, + }, + "machine_name": schema.StringAttribute{ + Description: "The name of the machine. Required only if `is_power_managed = true`", + Optional: true, + }, + "region": schema.StringAttribute{ + Description: "**[Azure, GCP: Required]** The region in which the machine resides. Required only if `is_power_managed = true`", + Optional: true, + }, + "resource_group_name": schema.StringAttribute{ + Description: "**[Azure: Required]** The resource group in which the machine resides. Required only if `is_power_managed = true`", + Optional: true, + }, + "project_name": schema.StringAttribute{ + Description: "**[GCP: Required]** The project name in which the machine resides. Required only if `is_power_managed = true`", + Optional: true, + }, + "availability_zone": schema.StringAttribute{ + Description: "**[AWS: Required]** The availability zone in which the machine resides. Required only if `is_power_managed = true`", + Optional: true, + }, + "datacenter": schema.StringAttribute{ + Description: "**[VSphere: Required]** The datacenter in which the machine resides. Required only if `is_power_managed = true`", + Optional: true, + }, + "cluster": schema.StringAttribute{ + Description: "**[VSphere: Optional]** The cluster in which the machine resides. To be used only if `is_power_managed = true`", + Optional: true, + }, + "host": schema.StringAttribute{ + Description: "**[VSphere: Required]** The IP address or FQDN of the host in which the machine resides. Required only if `is_power_managed = true`", + Optional: true, + }, + }, + }, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + }, + }, + }, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + }, + "remote_pc_ous": schema.ListNestedAttribute{ + Description: "Organizational Units to be included in the Remote PC machine catalog. Only to be used when `is_remote_pc = true`. For adding machines, use `machine_accounts`.", + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "include_subfolders": schema.BoolAttribute{ + Description: "Specify if subfolders should be included.", + Required: true, + }, + "ou_name": schema.StringAttribute{ + Description: "Name of the OU.", + Required: true, + }, + }, + }, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + }, + "provisioning_scheme": schema.SingleNestedAttribute{ + Description: "Machine catalog provisioning scheme. Required when `provisioning_type = MCS`", + Optional: true, + Attributes: map[string]schema.Attribute{ + "hypervisor": schema.StringAttribute{ + Description: "Id of the hypervisor for creating the machines. Required only if using power managed machines.", + Required: true, + Validators: []validator.String{ + stringvalidator.RegexMatches(regexp.MustCompile(util.GuidRegex), "must be specified with ID in GUID format"), + }, + }, + "hypervisor_resource_pool": schema.StringAttribute{ + Description: "Id of the hypervisor resource pool that will be used for provisioning operations.", + Required: true, + Validators: []validator.String{ + stringvalidator.RegexMatches(regexp.MustCompile(util.GuidRegex), "must be specified with ID in GUID format"), + }, + }, + "azure_machine_config": schema.SingleNestedAttribute{ + Description: "Machine Configuration For Azure MCS catalog.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "service_offering": schema.StringAttribute{ + Description: "The Azure VM Sku to use when creating machines.", + Required: true, + }, + "resource_group": schema.StringAttribute{ + Description: "The Azure Resource Group where the image VHD / managed disk / snapshot for creating machines is located.", + Required: true, + }, + "master_image": schema.StringAttribute{ + Description: "The name of the virtual machine snapshot or VM template that will be used. This identifies the hard disk to be used and the default values for the memory and processors. Omit this field if you want to use gallery_image.", + Optional: true, + }, + "storage_account": schema.StringAttribute{ + Description: "The Azure Storage Account where the image VHD for creating machines is located. Only applicable to Azure VHD image blob.", + Optional: true, + Validators: []validator.String{ + stringvalidator.AlsoRequires(path.Expressions{ + path.MatchRelative().AtParent().AtName("container"), + }...), + stringvalidator.AlsoRequires(path.Expressions{ + path.MatchRelative().AtParent().AtName("resource_group"), + }...), + }, + }, + "container": schema.StringAttribute{ + Description: "The Azure Storage Account Container where the image VHD for creating machines is located. Only applicable to Azure VHD image blob.", + Optional: true, + Validators: []validator.String{ + stringvalidator.AlsoRequires(path.Expressions{ + path.MatchRelative().AtParent().AtName("storage_account"), + }...), + stringvalidator.AlsoRequires(path.Expressions{ + path.MatchRelative().AtParent().AtName("resource_group"), + }...), + }, + }, + "gallery_image": schema.SingleNestedAttribute{ + Description: "Details of the Azure Image Gallery image to use for creating machines. Only Applicable to Azure Image Gallery image.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "gallery": schema.StringAttribute{ + Description: "The Azure Image Gallery where the image for creating machines is located. Only applicable to Azure Image Gallery image.", + Required: true, + }, + "definition": schema.StringAttribute{ + Description: "The image definition for the image to be used in the Azure Image Gallery. Only applicable to Azure Image Gallery image.", + Required: true, + }, + "version": schema.StringAttribute{ + Description: "The image version for the image to be used in the Azure Image Gallery. Only applicable to Azure Image Gallery image.", + Required: true, + }, + }, + Validators: []validator.Object{ + objectvalidator.AlsoRequires(path.Expressions{ + path.MatchRelative().AtParent().AtName("resource_group"), + }...), + objectvalidator.ConflictsWith(path.Expressions{ + path.MatchRelative().AtParent().AtName("storage_account"), + }...), + objectvalidator.ConflictsWith(path.Expressions{ + path.MatchRelative().AtParent().AtName("container"), + }...), + objectvalidator.ConflictsWith(path.Expressions{ + path.MatchRelative().AtParent().AtName("master_image"), + }...), + }, + }, + "storage_type": schema.StringAttribute{ + Description: "Storage account type used for provisioned virtual machine disks on Azure. Storage types include: `Standard_LRS`, `StandardSSD_LRS` and `Premium_LRS`.", + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf( + "Standard_LRS", + "StandardSSD_LRS", + "Premium_LRS", + ), + }, + }, + "vda_resource_group": schema.StringAttribute{ + Description: "Designated resource group where the VDA VMs will be located on Azure.", + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "use_managed_disks": schema.BoolAttribute{ + Description: "Indicate whether to use Azure managed disks for the provisioned virtual machine.", + Optional: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + }, + "machine_profile": schema.SingleNestedAttribute{ + Description: "The name of the virtual machine template that will be used to identify the default value for the tags, virtual machine size, boot diagnostics, host cache property of OS disk, accelerated networking and availability zone." + "
" + + "Required when identity_type is set to `AzureAD`", + Optional: true, + Attributes: map[string]schema.Attribute{ + "machine_profile_vm_name": schema.StringAttribute{ + Description: "The name of the machine profile virtual machine.", + Required: true, + }, + "machine_profile_resource_group": schema.StringAttribute{ + Description: "The resource group name where machine profile VM is located in.", + Required: true, + }, + }, + }, + "writeback_cache": schema.SingleNestedAttribute{ + Description: "Write-back Cache config. Leave this empty to disable Write-back Cache. Write-back Cache requires Machine image with Write-back Cache plugin installed.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "persist_wbc": schema.BoolAttribute{ + Description: "Persist Write-back Cache", + Required: true, + }, + "wbc_disk_storage_type": schema.StringAttribute{ + Description: "Type of naming scheme. Choose between Numeric and Alphabetic.", + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf( + "StandardSSD_LRS", + "Standard_LRS", + "Premium_LRS", + ), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "persist_os_disk": schema.BoolAttribute{ + Description: "Persist the OS disk when power cycling the non-persistent provisioned virtual machine.", + Required: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + }, + "persist_vm": schema.BoolAttribute{ + Description: "Persist the non-persistent provisioned virtual machine in Azure environments when power cycling. This property only applies when the PersistOsDisk property is set to True.", + Required: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + }, + "storage_cost_saving": schema.BoolAttribute{ + Description: "Save storage cost by downgrading the storage type of the disk to Standard HDD when VM shut down.", + Required: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + }, + "writeback_cache_disk_size_gb": schema.Int64Attribute{ + Description: "The size in GB of any temporary storage disk used by the write back cache.", + Required: true, + Validators: []validator.Int64{ + int64validator.AtLeast(0), + }, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.RequiresReplace(), + }, + }, + "writeback_cache_memory_size_mb": schema.Int64Attribute{ + Description: "The size of the in-memory write back cache in MB.", + Optional: true, + Validators: []validator.Int64{ + int64validator.AtLeast(0), + }, + PlanModifiers: []planmodifier.Int64{ // TO DO - Allow updating master image + int64planmodifier.RequiresReplace(), + }, + }, + }, + }, + }, + }, + "aws_machine_config": schema.SingleNestedAttribute{ + Description: "Machine Configuration For AWS EC2 MCS catalog.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "service_offering": schema.StringAttribute{ + Description: "The AWS VM Sku to use when creating machines.", + Required: true, + }, + "master_image": schema.StringAttribute{ + Description: "The name of the virtual machine image that will be used.", + Required: true, + }, + "image_ami": schema.StringAttribute{ + Description: "AMI of the AWS image to be used as the template image for the machine catalog.", + Required: true, + }, + }, + }, + "gcp_machine_config": schema.SingleNestedAttribute{ + Description: "Machine Configuration For GCP MCS catalog.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "master_image": schema.StringAttribute{ + Description: "The name of the virtual machine snapshot or VM template that will be used. This identifies the hard disk to be used and the default values for the memory and processors.", + Required: true, + }, + "machine_profile": schema.StringAttribute{ + Description: "The name of the virtual machine template that will be used to identify the default value for the tags, virtual machine size, boot diagnostics, host cache property of OS disk, accelerated networking and availability zone. If not specified, the VM specified in master_image will be used as template.", + Optional: true, + }, + "machine_snapshot": schema.StringAttribute{ + Description: "The name of the virtual machine snapshot of a GCP VM that will be used as master image.", + Optional: true, + }, + "storage_type": schema.StringAttribute{ + Description: "Storage type used for provisioned virtual machine disks on GCP. Storage types include: `pd-standar`, `pd-balanced`, `pd-ssd` and `pd-extreme`.", + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf( + "pd-standard", + "pd-balanced", + "pd-ssd", + "pd-extreme", + ), + }, + }, + "writeback_cache": schema.SingleNestedAttribute{ + Description: "Write-back Cache config. Leave this empty to disable Write-back Cache.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "persist_wbc": schema.BoolAttribute{ + Description: "Persist Write-back Cache", + Required: true, + }, + "wbc_disk_storage_type": schema.StringAttribute{ + Description: "Type of naming scheme. Choose between Numeric and Alphabetic.", + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf( + "pd-standard", + "pd-balanced", + "pd-ssd", + ), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "persist_os_disk": schema.BoolAttribute{ + Description: "Persist the OS disk when power cycling the non-persistent provisioned virtual machine.", + Required: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + }, + "writeback_cache_disk_size_gb": schema.Int64Attribute{ + Description: "The size in GB of any temporary storage disk used by the write back cache.", + Required: true, + Validators: []validator.Int64{ + int64validator.AtLeast(0), + }, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.RequiresReplace(), + }, + }, + "writeback_cache_memory_size_mb": schema.Int64Attribute{ + Description: "The size of the in-memory write back cache in MB.", + Optional: true, + Validators: []validator.Int64{ + int64validator.AtLeast(0), + }, + PlanModifiers: []planmodifier.Int64{ // TO DO - Allow updating master image + int64planmodifier.RequiresReplace(), + }, + }, + "persist_vm": schema.BoolAttribute{ + Description: "Not supported for GCP.", + Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + }, + "storage_cost_saving": schema.BoolAttribute{ + Description: "Not supported for GCP.", + Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + }, + }, + }, + }, + }, + "machine_domain_identity": schema.SingleNestedAttribute{ + Description: "The domain identity for machines in the machine catalog." + "
" + + "Required when identity_type is set to `ActiveDirectory`", + Optional: true, + Attributes: map[string]schema.Attribute{ + "domain": schema.StringAttribute{ + Description: "The AD domain name for the pool. Specify this in FQDN format; for example, MyDomain.com.", + Required: true, + Validators: []validator.String{ + stringvalidator.RegexMatches(regexp.MustCompile(util.DomainFqdnRegex), "must be in FQDN format"), + }, + }, + "domain_ou": schema.StringAttribute{ + Description: "The organization unit that computer accounts will be created into.", + Optional: true, + }, + "service_account": schema.StringAttribute{ + Description: "Service account for the domain. Only the username is required; do not include the domain name.", + Required: true, + }, + "service_account_password": schema.StringAttribute{ + Description: "Service account password for the domain.", + Required: true, + Sensitive: true, + }, + }, + }, + "number_of_total_machines": schema.Int64Attribute{ + Description: "Number of VDA machines allocated in the catalog.", + Required: true, + Validators: []validator.Int64{ + int64validator.AtLeast(1), + }, + }, + "network_mapping": schema.SingleNestedAttribute{ + Description: "Specifies how the attached NICs are mapped to networks. If this parameter is omitted, provisioned VMs are created with a single NIC, which is mapped to the default network in the hypervisor resource pool. If this parameter is supplied, machines are created with the number of NICs specified in the map, and each NIC is attached to the specified network." + "
" + + "Required when `provisioning_scheme.identity_type` is `AzureAD`.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "network_device": schema.StringAttribute{ + Description: "Name or Id of the network device.", + Required: true, + Validators: []validator.String{ + stringvalidator.AlsoRequires(path.Expressions{ + path.MatchRelative().AtParent().AtName("network"), + }...), + }, + }, + "network": schema.StringAttribute{ + Description: "The name of the virtual network that the device should be attached to. This must be a subnet within a Virtual Private Cloud item in the resource pool to which the Machine Catalog is associated." + "
" + + "For AWS, please specify the network mask of the network you want to use within the VPC.", + Required: true, + Validators: []validator.String{ + stringvalidator.AlsoRequires(path.Expressions{ + path.MatchRelative().AtParent().AtName("network_device"), + }...), + }, + }, + }, + }, + "availability_zones": schema.StringAttribute{ + Description: "The Availability Zones for provisioning virtual machines. Use a comma as a delimiter for multiple availability_zones.", + Optional: true, + }, + "identity_type": schema.StringAttribute{ + Description: "The identity type of the machines to be created. Supported values are`ActiveDirectory`, `AzureAD`, and `HybridAzureAD`.", + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf( + string(citrixorchestration.IDENTITYTYPE_ACTIVE_DIRECTORY), + string(citrixorchestration.IDENTITYTYPE_AZURE_AD), + string(citrixorchestration.IDENTITYTYPE_HYBRID_AZURE_AD), + ), + validators.AlsoRequiresOnValues( + []string{ + string(citrixorchestration.IDENTITYTYPE_ACTIVE_DIRECTORY), + }, + path.MatchRelative().AtParent().AtName("machine_domain_identity"), + ), + validators.AlsoRequiresOnValues( + []string{ + string(citrixorchestration.IDENTITYTYPE_HYBRID_AZURE_AD), + }, + path.MatchRelative().AtParent().AtName("machine_domain_identity"), + ), + validators.AlsoRequiresOnValues( + []string{ + string(citrixorchestration.IDENTITYTYPE_AZURE_AD), + }, + path.MatchRelative().AtParent().AtName("azure_machine_config"), + path.MatchRelative().AtParent().AtName("azure_machine_config").AtName("machine_profile"), + path.MatchRelative().AtParent().AtName("network_mapping"), + ), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "machine_account_creation_rules": schema.SingleNestedAttribute{ + Description: "Rules specifying how Active Directory machine accounts should be created when machines are provisioned.", + Required: true, + Attributes: map[string]schema.Attribute{ + "naming_scheme": schema.StringAttribute{ + Description: "Defines the template name for AD accounts created in the identity pool.", + Required: true, + }, + "naming_scheme_type": schema.StringAttribute{ + Description: "Type of naming scheme. This defines the format of the variable part of the AD account names that will be created. Choose between `Numeric`, `Alphabetic` and `Unicode`.", + Required: true, + Validators: []validator.String{ + util.GetValidatorFromEnum(citrixorchestration.AllowedAccountNamingSchemeTypeEnumValues), + }, + }, + }, + }, + }, + }, + }, + } +} diff --git a/internal/daas/resources/machine_catalog/machine_config.go b/internal/daas/machine_catalog/machine_config.go similarity index 100% rename from internal/daas/resources/machine_catalog/machine_config.go rename to internal/daas/machine_catalog/machine_config.go diff --git a/internal/daas/policies/policy_set_resource.go b/internal/daas/policies/policy_set_resource.go new file mode 100644 index 0000000..f760925 --- /dev/null +++ b/internal/daas/policies/policy_set_resource.go @@ -0,0 +1,835 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package policies + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + citrixorchestration "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + citrixdaasclient "github.com/citrix/citrix-daas-rest-go/client" + "github.com/citrix/terraform-provider-citrix/internal/util" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &policySetResource{} + _ resource.ResourceWithConfigure = &policySetResource{} + _ resource.ResourceWithImportState = &policySetResource{} +) + +// NewPolicySetResource is a helper function to simplify the provider implementation. +func NewPolicySetResource() resource.Resource { + return &policySetResource{} +} + +// policySetResource is the resource implementation. +type policySetResource struct { + client *citrixdaasclient.CitrixDaasClient +} + +// Metadata returns the data source type name. +func (r *policySetResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_policy_set" +} + +// Configure implements resource.ResourceWithConfigure. +func (r *policySetResource) Configure(_ context.Context, req resource.ConfigureRequest, _ *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + r.client = req.ProviderData.(*citrixdaasclient.CitrixDaasClient) +} + +// Schema implements resource.Resource. +func (*policySetResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Manages", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "GUID identifier of the policy set.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "name": schema.StringAttribute{ + Description: "Name of the policy set.", + Required: true, + }, + "type": schema.StringAttribute{ + Description: "Type of the policy set. Type can be one of `SitePolicies`, `DeliveryGroupPolicies`, `SiteTemplates`, or `CustomTemplates`.", + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf([]string{ + "SitePolicies", + "DeliveryGroupPolicies", + "SiteTemplates", + "CustomTemplates"}...), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "description": schema.StringAttribute{ + Description: "Description of the policy set.", + Optional: true, + }, + "scopes": schema.SetAttribute{ + ElementType: types.StringType, + Description: "The names of the scopes for the policy set to apply on.", + Required: true, + }, + "policies": schema.ListNestedAttribute{ + Description: "Ordered list of policies.", + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Description: "Name of the policy.", + Required: true, + }, + "description": schema.StringAttribute{ + Description: "Description of the policy.", + Optional: true, + }, + "is_enabled": schema.BoolAttribute{ + Description: "Indicate whether the policy is being enabled.", + Required: true, + }, + "policy_settings": schema.SetNestedAttribute{ + Description: "Set of policy settings.", + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Description: "Name of the policy setting name.", + Required: true, + }, + "use_default": schema.BoolAttribute{ + Description: "Indicate whether using default value for the policy setting.", + Required: true, + }, + "value": schema.StringAttribute{ + Description: "Value of the policy setting.", + Required: true, + }, + }, + }, + }, + "policy_filters": schema.SetNestedAttribute{ + Description: "Set of policy filters.", + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "type": schema.StringAttribute{ + Description: "Type of the policy filter. Type can be one of `AccessControl`, `BranchRepeater`, `ClientIP`, `ClientName`, `DesktopGroup`, `DesktopKind`, `OU`, `User`, and `DesktopTag`", + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf([]string{ + "AccessControl", + "BranchRepeater", + "ClientIP", + "ClientName", + "DesktopGroup", + "DesktopKind", + "OU", + "User", + "DesktopTag"}...), + }, + }, + "data": schema.StringAttribute{ + Description: "Data of the policy filter.", + Optional: true, + }, + "is_enabled": schema.BoolAttribute{ + Description: "Indicate whether the policy is being enabled.", + Required: true, + }, + "is_allowed": schema.BoolAttribute{ + Description: "Indicate the filtered policy is allowed or denied if the filter condition is met.", + Required: true, + }, + }, + }, + }, + }, + }, + }, + "is_assigned": schema.BoolAttribute{ + Description: "Indicate whether the policy set is being assigned to delivery groups.", + Computed: true, + }, + }, + } +} + +// Create implements resource.Resource. +func (r *policySetResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Retrieve values from plan + var plan PolicySetResourceModel + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + policySets, err := getPolicySets(ctx, r.client, &resp.Diagnostics) + if err != nil { + return + } + + for _, policySet := range policySets { + if strings.EqualFold(policySet.GetName(), plan.Name.ValueString()) { + resp.Diagnostics.AddError( + "Error Creating Policy Set", + "Policy Set with name "+plan.Name.ValueString()+" already exists", + ) + return + } + } + + var createPolicySetRequestBody = &citrixorchestration.PolicySetRequest{} + createPolicySetRequestBody.SetName(plan.Name.ValueString()) + createPolicySetRequestBody.SetDescription(plan.Description.ValueString()) + createPolicySetRequestBody.SetPolicySetType(plan.Type.ValueString()) + + createPolicySetRequestBody.SetScopes(util.ConvertBaseStringArrayToPrimitiveStringArray(plan.Scopes)) + + createPolicySetRequest := r.client.ApiClient.GpoDAAS.GpoCreateGpoPolicySet(ctx) + createPolicySetRequest = createPolicySetRequest.PolicySetRequest(*createPolicySetRequestBody) + + // Create new Policy Set + policySetResponse, httpResp, err := citrixdaasclient.AddRequestData(createPolicySetRequest, r.client).Execute() + if err != nil { + resp.Diagnostics.AddError( + "Error Creating Policy Set", + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + return + } + + // Create new policies + batchRequestModel, err := constructCreatePolicyBatchRequestModel(plan.Policies, policySetResponse.GetPolicySetGuid(), policySetResponse.GetName(), r.client, resp.Diagnostics) + if err != nil { + return + } + + successfulJobs, txId, err := citrixdaasclient.PerformBatchOperation(ctx, r.client, batchRequestModel) + if err != nil { + resp.Diagnostics.AddError( + "Error adding Policies to Policy Set "+policySetResponse.GetName(), + "TransactionId: "+txId+ + "\nError message: "+util.ReadClientError(err), + ) + } + + if successfulJobs < len(plan.Policies) { + errMsg := fmt.Sprintf("An error occurred while adding policies to the Policy Set. %d of %d policies were added to the Policy Set.", successfulJobs, len(plan.Policies)) + resp.Diagnostics.AddError( + "Error adding Policies to Policy Set "+policySetResponse.GetName(), + "TransactionId: "+txId+ + "\n"+errMsg, + ) + } + + // Try getting the new policy set with policy set GUID + policySet, err := getPolicySet(ctx, r.client, &resp.Diagnostics, policySetResponse.GetPolicySetGuid()) + if err != nil { + return + } + + if len(policySet.Policies) > 0 { + // Update Policy Priority + policyPriorityRequest := constructPolicyPriorityRequest(ctx, r.client, policySet, plan.Policies) + // Update policy priorities in the Policy Set + policyPriorityResponse, httpResp, err := citrixdaasclient.AddRequestData(policyPriorityRequest, r.client).Execute() + if err != nil || !policyPriorityResponse { + resp.Diagnostics.AddError( + "Error Changing Policy Priorities in Policy Set "+policySet.GetPolicySetGuid(), + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + } + } + + // Try getting the new policy set with policy set GUID + policySet, err = getPolicySet(ctx, r.client, &resp.Diagnostics, policySetResponse.GetPolicySetGuid()) + if err != nil { + return + } + + policies, err := getPolicies(ctx, r.client, &resp.Diagnostics, policySetResponse.GetPolicySetGuid()) + if err != nil { + return + } + + util.RefreshList(plan.Scopes, policySet.Scopes) + + // Map response body to schema and populate Computed attribute values + plan = plan.RefreshPropertyValues(policySet, policies) + + // Set state to fully populated data + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read implements resource.Resource. +func (r *policySetResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + var state PolicySetResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get refreshed policy set properties from Orchestration + policySet, err := readPolicySet(ctx, r.client, resp, state.Id.ValueString()) + if err != nil { + return + } + + policies, err := readPolicies(ctx, r.client, resp, state.Id.ValueString()) + if err != nil { + return + } + + util.RefreshList(state.Scopes, policySet.Scopes) + + state = state.RefreshPropertyValues(policySet, policies) + + // Set refreshed state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Update implements resource.Resource. +func (r *policySetResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Retrieve values from plan + var plan PolicySetResourceModel + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get refreshed policy set properties from Orchestration + policySetId := plan.Id.ValueString() + policySetName := plan.Name.ValueString() + + _, err := getPolicySet(ctx, r.client, &resp.Diagnostics, policySetId) + if err != nil { + return + } + + policySets, err := getPolicySets(ctx, r.client, &resp.Diagnostics) + if err != nil { + return + } + + for _, policySet := range policySets { + if strings.EqualFold(policySet.GetName(), policySetName) && !strings.EqualFold(policySet.GetPolicySetGuid(), policySetId) { + resp.Diagnostics.AddError( + "Error Updating Policy Set "+policySetId, + "Policy Set with name "+policySetName+" already exists", + ) + return + } + } + + stateAndPlanDiff, _ := req.State.Raw.Diff(req.Plan.Raw) + var policiesModified bool + for _, diff := range stateAndPlanDiff { + if diff.Path.Steps()[0].Equal(tftypes.AttributeName("policies")) { + policiesModified = true + break + } + } + + if policiesModified { + // Get Remote Policies + policies, err := getPolicies(ctx, r.client, &resp.Diagnostics, policySetId) + if err != nil { + return + } + + // Setup batch requests + deletePolicyBatchRequestItems := []citrixorchestration.BatchRequestItemModel{} + batchApiHeaders, httpResp, err := generateBatchApiHeaders(r.client) + if err != nil { + resp.Diagnostics.AddError( + "Error updating policies in policy set "+policySetName, + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nCould not update policies within the policy set, unexpected error: "+util.ReadClientError(err), + ) + return + } + // Clean up all the policies, settings, and filters in policy set + for index, policy := range policies.Items { + relativeUrl := fmt.Sprintf("/gpo/policies/%s", policy.GetPolicyGuid()) + + var batchRequestItem citrixorchestration.BatchRequestItemModel + batchRequestItem.SetReference(fmt.Sprintf("deletePolicy%s", strconv.Itoa(index))) + batchRequestItem.SetMethod(http.MethodDelete) + batchRequestItem.SetRelativeUrl(r.client.GetBatchRequestItemRelativeUrl(relativeUrl)) + batchRequestItem.SetHeaders(batchApiHeaders) + deletePolicyBatchRequestItems = append(deletePolicyBatchRequestItems, batchRequestItem) + } + + var deletePolicyBatchRequestModel citrixorchestration.BatchRequestModel + deletePolicyBatchRequestModel.SetItems(deletePolicyBatchRequestItems) + + successfulJobs, txId, err := citrixdaasclient.PerformBatchOperation(ctx, r.client, deletePolicyBatchRequestModel) + if err != nil { + resp.Diagnostics.AddError( + "Error cleanup Policies in Policy Set "+policySetName, + "TransactionId: "+txId+ + "\nError message: "+util.ReadClientError(err), + ) + return + } + + if successfulJobs < len(deletePolicyBatchRequestItems) { + errMsg := fmt.Sprintf("An error occurred while deleting policies in the Policy Set. %d of %d policies were deleted from the Policy Set.", successfulJobs, len(deletePolicyBatchRequestItems)) + resp.Diagnostics.AddError( + "Error deleting Policies to Policy Set "+policySetName, + "TransactionId: "+txId+ + "\n"+errMsg, + ) + + return + } + + // Create all the policies, settings, and filters in the plan + createPoliciesBatchRequestModel, err := constructCreatePolicyBatchRequestModel(plan.Policies, plan.Id.ValueString(), plan.Name.ValueString(), r.client, resp.Diagnostics) + if err != nil { + return + } + + successfulJobs, txId, err = citrixdaasclient.PerformBatchOperation(ctx, r.client, createPoliciesBatchRequestModel) + if err != nil { + resp.Diagnostics.AddError( + "Error adding Policies to Policy Set "+policySetName, + "TransactionId: "+txId+ + "\nError message: "+util.ReadClientError(err), + ) + return + } + + if successfulJobs < len(createPoliciesBatchRequestModel.Items) { + errMsg := fmt.Sprintf("An error occurred while adding policies to the Policy Set. %d of %d policies were added to the Policy Set.", successfulJobs, len(createPoliciesBatchRequestModel.Items)) + resp.Diagnostics.AddError( + "Error adding Policies to Policy Set "+policySetName, + "TransactionId: "+txId+ + "\n"+errMsg, + ) + + return + } + + // Update policy priority + policySet, err := getPolicySet(ctx, r.client, &resp.Diagnostics, policySetId) + if err != nil { + return + } + + if len(policySet.Policies) > 0 { + policyPriorityRequest := constructPolicyPriorityRequest(ctx, r.client, policySet, plan.Policies) + // Update policy priorities in the Policy Set + policyPriorityResponse, httpResp, err := citrixdaasclient.AddRequestData(policyPriorityRequest, r.client).Execute() + if err != nil || !policyPriorityResponse { + resp.Diagnostics.AddError( + "Error updating Policy Priorities in Policy Set "+policySet.GetPolicySetGuid(), + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + return + } + } + } + + // Construct the update model + var editPolicySetRequestBody = &citrixorchestration.PolicySetRequest{} + editPolicySetRequestBody.SetName(policySetName) + editPolicySetRequestBody.SetDescription(plan.Description.ValueString()) + scopeIds, err := fetchScopeIdsByNames(ctx, r.client, resp.Diagnostics, plan.Scopes) + if err != nil { + return + } + editPolicySetRequestBody.SetScopes(util.ConvertBaseStringArrayToPrimitiveStringArray(scopeIds)) + + editPolicySetRequest := r.client.ApiClient.GpoDAAS.GpoUpdateGpoPolicySet(ctx, policySetId) + editPolicySetRequest = editPolicySetRequest.PolicySetRequest(*editPolicySetRequestBody) + + // Update Policy Set + httpResp, err := citrixdaasclient.AddRequestData(editPolicySetRequest, r.client).Execute() + if err != nil { + resp.Diagnostics.AddError( + "Error Updating Policy Set", + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + return + } + + // Try getting the new policy set with policy set GUID + policySet, err := getPolicySet(ctx, r.client, &resp.Diagnostics, policySetId) + if err != nil { + return + } + + policies, err := getPolicies(ctx, r.client, &resp.Diagnostics, policySetId) + if err != nil { + return + } + + util.RefreshList(plan.Scopes, policySet.Scopes) + + // Map response body to schema and populate Computed attribute values + plan = plan.RefreshPropertyValues(policySet, policies) + + // Set state to fully populated data + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete implements resource.Resource. +func (r *policySetResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + defer util.PanicHandler(&resp.Diagnostics) + + // Retrieve values from state + var state PolicySetResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + policySetId := state.Id.ValueString() + policySetName := state.Name.ValueString() + // Get delivery groups and check if the current policy set is assigned to one of them + getDeliveryGroupsRequest := r.client.ApiClient.DeliveryGroupsAPIsDAAS.DeliveryGroupsGetDeliveryGroups(ctx) + deliveryGroups, httpResp, err := citrixdaasclient.AddRequestData(getDeliveryGroupsRequest, r.client).Execute() + if err != nil { + resp.Diagnostics.AddError( + "Error unassign policy set "+policySetName+" from delivery groups "+policySetName, + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nCould not get delivery group associated with the policy set, unexpected error: "+util.ReadClientError(err), + ) + return + } + associatedDeliveryGroupIds := []string{} + for _, deliveryGroup := range deliveryGroups.Items { + if deliveryGroup.GetPolicySetGuid() == policySetId { + associatedDeliveryGroupIds = append(associatedDeliveryGroupIds, deliveryGroup.GetId()) + } + } + + if len(associatedDeliveryGroupIds) > 0 { + // Unassign policy set from delivery groups to unblock delete operation + batchApiHeaders, httpResp, err := generateBatchApiHeaders(r.client) + if err != nil { + resp.Diagnostics.AddError( + "Error unassign policy set "+policySetName+" from delivery groups "+policySetName, + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nCould not remove policy set from delivery groups, unexpected error: "+util.ReadClientError(err), + ) + return + } + batchRequestItems := []citrixorchestration.BatchRequestItemModel{} + var editDeliveryGroupRequestBody citrixorchestration.EditDeliveryGroupRequestModel + editDeliveryGroupRequestBody.SetPolicySetGuid(util.DefaultSitePolicySetId) + editDeliveryGroupStringBody, err := util.ConvertToString(editDeliveryGroupRequestBody) + if err != nil { + resp.Diagnostics.AddError( + "Error policy set "+policySetName+" from delivery groups", + "An unexpected error occurred: "+err.Error(), + ) + return + } + + for index, deliveryGroupId := range associatedDeliveryGroupIds { + relativeUrl := fmt.Sprintf("/DeliveryGroups/%s", deliveryGroupId) + var batchRequestItem citrixorchestration.BatchRequestItemModel + batchRequestItem.SetReference(strconv.Itoa(index)) + batchRequestItem.SetMethod(http.MethodPatch) + batchRequestItem.SetRelativeUrl(r.client.GetBatchRequestItemRelativeUrl(relativeUrl)) + batchRequestItem.SetBody(editDeliveryGroupStringBody) + batchRequestItem.SetHeaders(batchApiHeaders) + batchRequestItems = append(batchRequestItems, batchRequestItem) + } + + if len(batchRequestItems) > 0 { + // If there are any machines that need to be put in maintenance mode + var batchRequestModel citrixorchestration.BatchRequestModel + batchRequestModel.SetItems(batchRequestItems) + successfulJobs, txId, err := citrixdaasclient.PerformBatchOperation(ctx, r.client, batchRequestModel) + if err != nil { + resp.Diagnostics.AddError( + "Error unassign policy set "+policySetName+" from delivery groups "+policySetName, + "TransactionId: "+txId+ + "\nError Message: "+util.ReadClientError(err), + ) + return + } + + if successfulJobs < len(batchRequestItems) { + errMsg := fmt.Sprintf("An error occurred removing policy set %s from delivery groups. Unassigned from %d of %d delivery groups.", policySetName, successfulJobs, len(batchRequestItems)) + resp.Diagnostics.AddError( + "Error deleting Policy Set "+policySetName, + "TransactionId: "+txId+ + "\n"+errMsg, + ) + + return + } + } + } + + // Delete existing Policy Set + deletePolicySetRequest := r.client.ApiClient.GpoDAAS.GpoDeleteGpoPolicySet(ctx, policySetId) + httpResp, err = citrixdaasclient.AddRequestData(deletePolicySetRequest, r.client).Execute() + if err != nil && httpResp.StatusCode != http.StatusNotFound { + resp.Diagnostics.AddError( + "Error Deleting Policy Set "+policySetName, + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + return + } +} + +// ImportState implements resource.ResourceWithImportState. +func (r *policySetResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// Gets the policy set and logs any errors +func getPolicySets(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, diagnostics *diag.Diagnostics) ([]citrixorchestration.PolicySetResponse, error) { + getPolicySetsRequest := client.ApiClient.GpoDAAS.GpoReadGpoPolicySets(ctx) + policySets, httpResp, err := citrixdaasclient.ExecuteWithRetry[*citrixorchestration.CollectionEnvelopeOfPolicySetResponse](getPolicySetsRequest, client) + if err != nil { + diagnostics.AddError( + "Error Reading Policy Sets", + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + } + + return policySets.Items, err +} + +func getPolicySet(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, diagnostics *diag.Diagnostics, policySetId string) (*citrixorchestration.PolicySetResponse, error) { + getPolicySetRequest := client.ApiClient.GpoDAAS.GpoReadGpoPolicySet(ctx, policySetId) + getPolicySetRequest = getPolicySetRequest.WithPolicies(true) + policySet, httpResp, err := citrixdaasclient.ExecuteWithRetry[*citrixorchestration.PolicySetResponse](getPolicySetRequest, client) + if err != nil { + diagnostics.AddError( + "Error Reading Policy Set "+policySetId, + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + } + + return policySet, err +} + +func readPolicySet(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.ReadResponse, policySetId string) (*citrixorchestration.PolicySetResponse, error) { + getPolicySetRequest := client.ApiClient.GpoDAAS.GpoReadGpoPolicySet(ctx, policySetId) + getPolicySetRequest = getPolicySetRequest.WithPolicies(true) + policySet, _, err := util.ReadResource[*citrixorchestration.PolicySetResponse](getPolicySetRequest, ctx, client, resp, "PolicySet", policySetId) + return policySet, err +} + +// Gets the policy set and logs any errors +func getPolicies(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, diagnostics *diag.Diagnostics, policySetId string) (*citrixorchestration.CollectionEnvelopeOfPolicyResponse, error) { + getPoliciesRequest := client.ApiClient.GpoDAAS.GpoReadGpoPolicies(ctx) + getPoliciesRequest = getPoliciesRequest.PolicySetGuid(policySetId) + getPoliciesRequest = getPoliciesRequest.WithFilters(true) + getPoliciesRequest = getPoliciesRequest.WithSettings(true) + policies, httpResp, err := citrixdaasclient.ExecuteWithRetry[*citrixorchestration.CollectionEnvelopeOfPolicyResponse](getPoliciesRequest, client) + if err != nil { + diagnostics.AddError( + "Error Reading Policies in Policy Set "+policySetId, + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + } + + return policies, err +} + +func readPolicies(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.ReadResponse, policySetId string) (*citrixorchestration.CollectionEnvelopeOfPolicyResponse, error) { + getPoliciesRequest := client.ApiClient.GpoDAAS.GpoReadGpoPolicies(ctx) + getPoliciesRequest = getPoliciesRequest.PolicySetGuid(policySetId) + getPoliciesRequest = getPoliciesRequest.WithFilters(true) + getPoliciesRequest = getPoliciesRequest.WithSettings(true) + policies, _, err := util.ReadResource[*citrixorchestration.CollectionEnvelopeOfPolicyResponse](getPoliciesRequest, ctx, client, resp, "Policies", policySetId) + return policies, err +} + +func generateBatchApiHeaders(client *citrixdaasclient.CitrixDaasClient) ([]citrixorchestration.NameValueStringPairModel, *http.Response, error) { + headers := []citrixorchestration.NameValueStringPairModel{} + + cwsAuthToken, httpResp, err := client.SignIn() + var token string + if err != nil { + return headers, httpResp, err + } + + if cwsAuthToken != "" { + token = strings.Split(cwsAuthToken, "=")[1] + var header citrixorchestration.NameValueStringPairModel + header.SetName("Authorization") + header.SetValue("Bearer " + token) + headers = append(headers, header) + } + + return headers, httpResp, err +} + +func constructCreatePolicyBatchRequestModel(policiesToCreate []PolicyModel, policySetGuid string, policySetName string, client *citrixdaasclient.CitrixDaasClient, diagnostic diag.Diagnostics) (citrixorchestration.BatchRequestModel, error) { + batchRequestItems := []citrixorchestration.BatchRequestItemModel{} + var batchRequestModel citrixorchestration.BatchRequestModel + + for policyIndex, policyToCreate := range policiesToCreate { + var createPolicyRequest = citrixorchestration.PolicyRequest{} + createPolicyRequest.SetName(policyToCreate.Name.ValueString()) + createPolicyRequest.SetDescription(policyToCreate.Description.ValueString()) + createPolicyRequest.SetIsEnabled(policyToCreate.IsEnabled.ValueBool()) + // Add Policy Settings + policySettings := []citrixorchestration.SettingRequest{} + for _, policySetting := range policyToCreate.PolicySettings { + settingRequest := citrixorchestration.SettingRequest{} + settingRequest.SetSettingName(policySetting.Name.ValueString()) + settingRequest.SetUseDefault(policySetting.UseDefault.ValueBool()) + settingRequest.SetSettingValue(policySetting.Value.ValueString()) + policySettings = append(policySettings, settingRequest) + } + createPolicyRequest.SetSettings(policySettings) + + // Add Policy Filters + policyFilters := []citrixorchestration.FilterRequest{} + for _, policyFilter := range policyToCreate.PolicyFilters { + filterRequest := citrixorchestration.FilterRequest{} + filterRequest.SetFilterType(policyFilter.Type.ValueString()) + filterRequest.SetFilterData(policyFilter.Data.ValueString()) + filterRequest.SetIsAllowed(policyFilter.IsAllowed.ValueBool()) + filterRequest.SetIsEnabled(policyFilter.IsEnabled.ValueBool()) + policyFilters = append(policyFilters, filterRequest) + } + createPolicyRequest.SetFilters(policyFilters) + + createPolicyRequestBodyString, err := util.ConvertToString(createPolicyRequest) + if err != nil { + diagnostic.AddError( + "Error adding Policy "+policyToCreate.Name.ValueString()+" to Policy Set "+policySetName, + "An unexpected error occurred: "+err.Error(), + ) + return batchRequestModel, err + } + + batchApiHeaders, httpResp, err := generateBatchApiHeaders(client) + if err != nil { + diagnostic.AddError( + "Error deleting policy from policy set "+policySetName, + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nCould not delete policies within the policy set to be updated, unexpected error: "+util.ReadClientError(err), + ) + return batchRequestModel, err + } + + relativeUrl := fmt.Sprintf("/gpo/policies?policySetGuid=%s", policySetGuid) + + var batchRequestItem citrixorchestration.BatchRequestItemModel + batchRequestItem.SetReference(fmt.Sprintf("createPolicy%d", policyIndex)) + batchRequestItem.SetMethod(http.MethodPost) + batchRequestItem.SetRelativeUrl(client.GetBatchRequestItemRelativeUrl(relativeUrl)) + batchRequestItem.SetHeaders(batchApiHeaders) + batchRequestItem.SetBody(createPolicyRequestBodyString) + batchRequestItems = append(batchRequestItems, batchRequestItem) + } + + batchRequestModel.SetItems(batchRequestItems) + return batchRequestModel, nil +} + +func constructPolicyPriorityRequest(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, policySet *citrixorchestration.PolicySetResponse, planedPolicies []PolicyModel) citrixorchestration.ApiGpoRankGpoPoliciesRequest { + // 1. Construct map of policy name: policy id + // 2. Construct array of policy id based on the policy name order + // 3. post policy priority + policyNameIdMap := map[types.String]types.String{} + if policySet.GetPolicies() != nil { + for _, policy := range policySet.GetPolicies() { + policyNameIdMap[types.StringValue(policy.GetPolicyName())] = types.StringValue(policy.GetPolicyGuid()) + } + } + policyPriority := []types.String{} + for _, policyToCreate := range planedPolicies { + policyPriority = append(policyPriority, policyNameIdMap[policyToCreate.Name]) + } + + policySetId := policySet.GetPolicySetGuid() + createPolicyPriorityRequest := client.ApiClient.GpoDAAS.GpoRankGpoPolicies(ctx) + createPolicyPriorityRequest = createPolicyPriorityRequest.PolicySetGuid(policySetId) + createPolicyPriorityRequest = createPolicyPriorityRequest.RequestBody(util.ConvertBaseStringArrayToPrimitiveStringArray(policyPriority)) + return createPolicyPriorityRequest +} + +func fetchScopeIdsByNames(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, diagnostics diag.Diagnostics, scopeNames []types.String) ([]types.String, error) { + getAdminScopesRequest := client.ApiClient.AdminAPIsDAAS.AdminGetAdminScopes(ctx) + // Create new Policy Set + getScopesResponse, httpResp, err := citrixdaasclient.AddRequestData(getAdminScopesRequest, client).Execute() + if err != nil || getScopesResponse == nil { + diagnostics.AddError( + "Error fetch scope ids from names", + "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ + "\nError message: "+util.ReadClientError(err), + ) + return nil, err + } + + scopeNameIdMap := map[string]types.String{} + for _, scope := range getScopesResponse.Items { + scopeNameIdMap[scope.GetName()] = types.StringValue(scope.GetId()) + } + + scopeIds := []types.String{} + for _, scopeName := range scopeNames { + scopeIds = append(scopeIds, scopeNameIdMap[scopeName.ValueString()]) + } + + return scopeIds, nil +} diff --git a/internal/daas/policies/policy_set_resource_model.go b/internal/daas/policies/policy_set_resource_model.go new file mode 100644 index 0000000..9e47cf5 --- /dev/null +++ b/internal/daas/policies/policy_set_resource_model.go @@ -0,0 +1,106 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package policies + +import ( + "sort" + + citrixorchestration "github.com/citrix/citrix-daas-rest-go/citrixorchestration" + "github.com/citrix/terraform-provider-citrix/internal/util" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type PolicySettingModel struct { + Name types.String `tfsdk:"name"` + UseDefault types.Bool `tfsdk:"use_default"` + Value types.String `tfsdk:"value"` +} + +type PolicyFilterModel struct { + Type types.String `tfsdk:"type"` + Data types.String `tfsdk:"data"` + IsAllowed types.Bool `tfsdk:"is_allowed"` + IsEnabled types.Bool `tfsdk:"is_enabled"` +} + +type PolicyModel struct { + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + IsEnabled types.Bool `tfsdk:"is_enabled"` + PolicySettings []PolicySettingModel `tfsdk:"policy_settings"` + PolicyFilters []PolicyFilterModel `tfsdk:"policy_filters"` +} + +type PolicySetResourceModel struct { + Id types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Type types.String `tfsdk:"type"` + Description types.String `tfsdk:"description"` + Scopes []types.String `tfsdk:"scopes"` + IsAssigned types.Bool `tfsdk:"is_assigned"` + Policies []PolicyModel `tfsdk:"policies"` +} + +func (r PolicySetResourceModel) RefreshPropertyValues(policySet *citrixorchestration.PolicySetResponse, policies *citrixorchestration.CollectionEnvelopeOfPolicyResponse) PolicySetResourceModel { + // Set required values + r.Id = types.StringValue(policySet.GetPolicySetGuid()) + r.Name = types.StringValue(policySet.GetName()) + r.Type = types.StringValue(string(policySet.GetPolicySetType())) + + // Set optional values + if policySet.GetDescription() != "" { + r.Description = types.StringValue(policySet.GetDescription()) + } else { + r.Description = types.StringNull() + } + + if policySet.GetScopes() != nil { + r.Scopes = util.ConvertPrimitiveStringArrayToBaseStringArray(policySet.GetScopes()) + } else { + r.Scopes = nil + } + + if policies != nil && policies.Items != nil { + policyItems := policies.Items + sort.Slice(policyItems, func(i, j int) bool { + return policyItems[i].GetPriority() < policyItems[j].GetPriority() + }) + r.Policies = []PolicyModel{} + for _, policy := range policyItems { + policyModel := PolicyModel{ + Name: types.StringValue(policy.GetPolicyName()), + Description: types.StringValue(policy.GetDescription()), + IsEnabled: types.BoolValue(policy.GetIsEnabled()), + } + + policyModel.PolicySettings = []PolicySettingModel{} + if policy.GetSettings() != nil && len(policy.GetSettings()) != 0 { + for _, setting := range policy.GetSettings() { + policyModel.PolicySettings = append(policyModel.PolicySettings, PolicySettingModel{ + Name: types.StringValue(setting.GetSettingName()), + UseDefault: types.BoolValue(setting.GetUseDefault()), + Value: types.StringValue(setting.GetSettingValue()), + }) + } + } + + policyModel.PolicyFilters = []PolicyFilterModel{} + if policy.GetFilters() != nil && len(policy.GetFilters()) != 0 { + for _, filter := range policy.GetFilters() { + policyModel.PolicyFilters = append(policyModel.PolicyFilters, PolicyFilterModel{ + Type: types.StringValue(filter.GetFilterType()), + IsAllowed: types.BoolValue(filter.GetIsAllowed()), + IsEnabled: types.BoolValue(filter.GetIsEnabled()), + Data: types.StringValue(filter.GetFilterData()), + }) + } + } + + r.Policies = append(r.Policies, policyModel) + } + } + + r.IsAssigned = types.BoolValue(policySet.GetIsAssigned()) + + return r +} diff --git a/internal/daas/resources/machine_catalog/machine_catalog_resource.go b/internal/daas/resources/machine_catalog/machine_catalog_resource.go deleted file mode 100644 index ac5293b..0000000 --- a/internal/daas/resources/machine_catalog/machine_catalog_resource.go +++ /dev/null @@ -1,2160 +0,0 @@ -// Copyright © 2023. Citrix Systems, Inc. - -package machine_catalog - -import ( - "context" - "encoding/base64" - "fmt" - "net/http" - "regexp" - "strconv" - "strings" - - citrixorchestration "github.com/citrix/citrix-daas-rest-go/citrixorchestration" - citrixdaasclient "github.com/citrix/citrix-daas-rest-go/client" - "github.com/citrix/terraform-provider-citrix/internal/util" - "github.com/citrix/terraform-provider-citrix/internal/validators" - - "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" - "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" - "github.com/hashicorp/terraform-plugin-framework-validators/objectvalidator" - "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" - "github.com/hashicorp/terraform-plugin-framework/path" - "github.com/hashicorp/terraform-plugin-framework/resource" - "github.com/hashicorp/terraform-plugin-framework/resource/schema" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" - "github.com/hashicorp/terraform-plugin-framework/schema/validator" - "github.com/hashicorp/terraform-plugin-framework/types" -) - -// Ensure the implementation satisfies the expected interfaces. -var ( - _ resource.Resource = &machineCatalogResource{} - _ resource.ResourceWithConfigure = &machineCatalogResource{} - _ resource.ResourceWithImportState = &machineCatalogResource{} - _ resource.ResourceWithValidateConfig = &machineCatalogResource{} -) - -// NewMachineCatalogResource is a helper function to simplify the provider implementation. -func NewMachineCatalogResource() resource.Resource { - return &machineCatalogResource{} -} - -// machineCatalogResource is the resource implementation. -type machineCatalogResource struct { - client *citrixdaasclient.CitrixDaasClient -} - -// Metadata returns the resource type name. -func (r *machineCatalogResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_daas_machine_catalog" -} - -// Schema defines the schema for the resource. -func (r *machineCatalogResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { - resp.Schema = schema.Schema{ - Description: "Manages a machine catalog.", - Attributes: map[string]schema.Attribute{ - "id": schema.StringAttribute{ - Description: "GUID identifier of the machine catalog.", - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - }, - }, - "name": schema.StringAttribute{ - Description: "Name of the machine catalog.", - Required: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.RequiresReplace(), - }, - }, - "description": schema.StringAttribute{ - Description: "Description of the machine catalog.", - Optional: true, - }, - "is_power_managed": schema.BoolAttribute{ - Description: "Specify if the machines in the machine catalog will be power managed.", - Optional: true, - PlanModifiers: []planmodifier.Bool{ - boolplanmodifier.RequiresReplace(), - }, - }, - "is_remote_pc": schema.BoolAttribute{ - Description: "Specify if this catalog is for Remote PC access.", - Optional: true, - PlanModifiers: []planmodifier.Bool{ - boolplanmodifier.RequiresReplace(), - }, - }, - "allocation_type": schema.StringAttribute{ - Description: "Denotes how the machines in the catalog are allocated to a user. Choose between `Static` and `Random`.", - Required: true, - Validators: []validator.String{ - util.GetValidatorFromEnum(citrixorchestration.AllowedAllocationTypeEnumValues), - }, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.RequiresReplace(), - }, - }, - "session_support": schema.StringAttribute{ - Description: "Session support type. Choose between `SingleSession` and `MultiSession`. Session support should be SingleSession when `is_remote_pc = true`", - Required: true, - Validators: []validator.String{ - util.GetValidatorFromEnum(citrixorchestration.AllowedSessionSupportEnumValues), - }, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.RequiresReplace(), - }, - }, - "zone": schema.StringAttribute{ - Description: "Id of the zone the machine catalog is associated with.", - Required: true, - }, - "vda_upgrade_type": schema.StringAttribute{ - Description: "Type of Vda Upgrade. Choose between LTSR and CR. When omitted, Vda Upgrade is disabled.", - Optional: true, - Validators: []validator.String{ - stringvalidator.OneOf( - "LTSR", - "CR", - ), - }, - }, - "provisioning_type": schema.StringAttribute{ - Description: "Specifies how the machines are provisioned in the catalog.", - Required: true, - Validators: []validator.String{ - stringvalidator.OneOf( - string(citrixorchestration.PROVISIONINGTYPE_MCS), - string(citrixorchestration.PROVISIONINGTYPE_MANUAL), - ), - }, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.RequiresReplace(), - }, - }, - "machine_accounts": schema.ListNestedAttribute{ - Description: "List of machine accounts to add to the catalog. Only to be used when using `provisioning_type = MANUAL`", - Optional: true, - NestedObject: schema.NestedAttributeObject{ - Attributes: map[string]schema.Attribute{ - "hypervisor": schema.StringAttribute{ - Description: "The Id of the hypervisor in which the machines reside. Required only if `is_power_managed = true`", - Optional: true, - }, - "machines": schema.ListNestedAttribute{ - Description: "List of machines", - Required: true, - NestedObject: schema.NestedAttributeObject{ - Attributes: map[string]schema.Attribute{ - "machine_name": schema.StringAttribute{ - Description: "The name of the machine. Must be in format DOMAIN\\MACHINE.", - Required: true, - Validators: []validator.String{ - stringvalidator.RegexMatches(regexp.MustCompile(util.SamRegex), "must be in format DOMAIN\\MACHINE"), - }, - }, - "region": schema.StringAttribute{ - Description: "**[Azure, GCP: Required]** The region in which the machine resides. Required only if `is_power_managed = true`", - Optional: true, - }, - "resource_group_name": schema.StringAttribute{ - Description: "**[Azure: Required]** The resource group in which the machine resides. Required only if `is_power_managed = true`", - Optional: true, - }, - "project_name": schema.StringAttribute{ - Description: "**[GCP: Required]** The project name in which the machine resides. Required only if `is_power_managed = true`", - Optional: true, - }, - "availability_zone": schema.StringAttribute{ - Description: "**[AWS: Required]** The availability zone in which the machine resides. Required only if `is_power_managed = true`", - Optional: true, - }, - }, - }, - Validators: []validator.List{ - listvalidator.SizeAtLeast(1), - }, - }, - }, - }, - Validators: []validator.List{ - listvalidator.SizeAtLeast(1), - }, - }, - "remote_pc_ous": schema.ListNestedAttribute{ - Description: "Organizational Units to be included in the Remote PC machine catalog. Only to be used when `is_remote_pc = true`. For adding machines, use `machine_accounts`.", - Optional: true, - NestedObject: schema.NestedAttributeObject{ - Attributes: map[string]schema.Attribute{ - "include_subfolders": schema.BoolAttribute{ - Description: "Specify if subfolders should be included.", - Required: true, - }, - "ou_name": schema.StringAttribute{ - Description: "Name of the OU.", - Required: true, - }, - }, - }, - Validators: []validator.List{ - listvalidator.SizeAtLeast(1), - }, - }, - "provisioning_scheme": schema.SingleNestedAttribute{ - Description: "Machine catalog provisioning scheme. Required when `provisioning_type = MCS`", - Optional: true, - Attributes: map[string]schema.Attribute{ - "hypervisor": schema.StringAttribute{ - Description: "Id of the hypervisor for creating the machines. Required only if using power managed machines.", - Required: true, - Validators: []validator.String{ - stringvalidator.RegexMatches(regexp.MustCompile(util.GuidRegex), "must be specified with ID in GUID format"), - }, - }, - "hypervisor_resource_pool": schema.StringAttribute{ - Description: "Id of the hypervisor resource pool that will be used for provisioning operations.", - Required: true, - Validators: []validator.String{ - stringvalidator.RegexMatches(regexp.MustCompile(util.GuidRegex), "must be specified with ID in GUID format"), - }, - }, - "azure_machine_config": schema.SingleNestedAttribute{ - Description: "Machine Configuration For Azure MCS catalog.", - Optional: true, - Attributes: map[string]schema.Attribute{ - "service_offering": schema.StringAttribute{ - Description: "The Azure VM Sku to use when creating machines.", - Required: true, - }, - "resource_group": schema.StringAttribute{ - Description: "The Azure Resource Group where the image VHD / managed disk / snapshot for creating machines is located.", - Required: true, - }, - "master_image": schema.StringAttribute{ - Description: "The name of the virtual machine snapshot or VM template that will be used. This identifies the hard disk to be used and the default values for the memory and processors. Omit this field if you want to use gallery_image.", - Optional: true, - }, - "storage_account": schema.StringAttribute{ - Description: "The Azure Storage Account where the image VHD for creating machines is located. Only applicable to Azure VHD image blob.", - Optional: true, - Validators: []validator.String{ - stringvalidator.AlsoRequires(path.Expressions{ - path.MatchRelative().AtParent().AtName("container"), - }...), - stringvalidator.AlsoRequires(path.Expressions{ - path.MatchRelative().AtParent().AtName("resource_group"), - }...), - }, - }, - "container": schema.StringAttribute{ - Description: "The Azure Storage Account Container where the image VHD for creating machines is located. Only applicable to Azure VHD image blob.", - Optional: true, - Validators: []validator.String{ - stringvalidator.AlsoRequires(path.Expressions{ - path.MatchRelative().AtParent().AtName("storage_account"), - }...), - stringvalidator.AlsoRequires(path.Expressions{ - path.MatchRelative().AtParent().AtName("resource_group"), - }...), - }, - }, - "gallery_image": schema.SingleNestedAttribute{ - Description: "Details of the Azure Image Gallery image to use for creating machines. Only Applicable to Azure Image Gallery image.", - Optional: true, - Attributes: map[string]schema.Attribute{ - "gallery": schema.StringAttribute{ - Description: "The Azure Image Gallery where the image for creating machines is located. Only applicable to Azure Image Gallery image.", - Required: true, - }, - "definition": schema.StringAttribute{ - Description: "The image definition for the image to be used in the Azure Image Gallery. Only applicable to Azure Image Gallery image.", - Required: true, - }, - "version": schema.StringAttribute{ - Description: "The image version for the image to be used in the Azure Image Gallery. Only applicable to Azure Image Gallery image.", - Required: true, - }, - }, - Validators: []validator.Object{ - objectvalidator.AlsoRequires(path.Expressions{ - path.MatchRelative().AtParent().AtName("resource_group"), - }...), - objectvalidator.ConflictsWith(path.Expressions{ - path.MatchRelative().AtParent().AtName("storage_account"), - }...), - objectvalidator.ConflictsWith(path.Expressions{ - path.MatchRelative().AtParent().AtName("container"), - }...), - objectvalidator.ConflictsWith(path.Expressions{ - path.MatchRelative().AtParent().AtName("master_image"), - }...), - }, - }, - "storage_type": schema.StringAttribute{ - Description: "Storage account type used for provisioned virtual machine disks on Azure. Storage types include: `Standard_LRS`, `StandardSSD_LRS` and `Premium_LRS`.", - Required: true, - Validators: []validator.String{ - stringvalidator.OneOf( - "Standard_LRS", - "StandardSSD_LRS", - "Premium_LRS", - ), - }, - }, - "vda_resource_group": schema.StringAttribute{ - Description: "Designated resource group where the VDA VMs will be located on Azure.", - Optional: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.RequiresReplace(), - }, - }, - "use_managed_disks": schema.BoolAttribute{ - Description: "Indicate whether to use Azure managed disks for the provisioned virtual machine.", - Optional: true, - PlanModifiers: []planmodifier.Bool{ - boolplanmodifier.RequiresReplace(), - }, - }, - "machine_profile": schema.SingleNestedAttribute{ - Description: "The name of the virtual machine template that will be used to identify the default value for the tags, virtual machine size, boot diagnostics, host cache property of OS disk, accelerated networking and availability zone." + "
" + - "Required when identity_type is set to `AzureAD`", - Optional: true, - Attributes: map[string]schema.Attribute{ - "machine_profile_vm_name": schema.StringAttribute{ - Description: "The name of the machine profile virtual machine.", - Required: true, - }, - "machine_profile_resource_group": schema.StringAttribute{ - Description: "The resource group name where machine profile VM is located in.", - Required: true, - }, - }, - }, - "writeback_cache": schema.SingleNestedAttribute{ - Description: "Write-back Cache config. Leave this empty to disable Write-back Cache. Write-back Cache requires Machine image with Write-back Cache plugin installed.", - Optional: true, - Attributes: map[string]schema.Attribute{ - "persist_wbc": schema.BoolAttribute{ - Description: "Persist Write-back Cache", - Required: true, - }, - "wbc_disk_storage_type": schema.StringAttribute{ - Description: "Type of naming scheme. Choose between Numeric and Alphabetic.", - Required: true, - Validators: []validator.String{ - stringvalidator.OneOf( - "StandardSSD_LRS", - "Standard_LRS", - "Premium_LRS", - ), - }, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.RequiresReplace(), - }, - }, - "persist_os_disk": schema.BoolAttribute{ - Description: "Persist the OS disk when power cycling the non-persistent provisioned virtual machine.", - Required: true, - PlanModifiers: []planmodifier.Bool{ - boolplanmodifier.RequiresReplace(), - }, - }, - "persist_vm": schema.BoolAttribute{ - Description: "Persist the non-persistent provisioned virtual machine in Azure environments when power cycling. This property only applies when the PersistOsDisk property is set to True.", - Required: true, - PlanModifiers: []planmodifier.Bool{ - boolplanmodifier.RequiresReplace(), - }, - }, - "storage_cost_saving": schema.BoolAttribute{ - Description: "Save storage cost by downgrading the storage type of the disk to Standard HDD when VM shut down.", - Required: true, - PlanModifiers: []planmodifier.Bool{ - boolplanmodifier.RequiresReplace(), - }, - }, - "writeback_cache_disk_size_gb": schema.Int64Attribute{ - Description: "The size in GB of any temporary storage disk used by the write back cache.", - Required: true, - Validators: []validator.Int64{ - int64validator.AtLeast(0), - }, - PlanModifiers: []planmodifier.Int64{ - int64planmodifier.RequiresReplace(), - }, - }, - "writeback_cache_memory_size_mb": schema.Int64Attribute{ - Description: "The size of the in-memory write back cache in MB.", - Optional: true, - Validators: []validator.Int64{ - int64validator.AtLeast(0), - }, - PlanModifiers: []planmodifier.Int64{ // TO DO - Allow updating master image - int64planmodifier.RequiresReplace(), - }, - }, - }, - }, - }, - }, - "aws_machine_config": schema.SingleNestedAttribute{ - Description: "Machine Configuration For AWS EC2 MCS catalog.", - Optional: true, - Attributes: map[string]schema.Attribute{ - "service_offering": schema.StringAttribute{ - Description: "The AWS VM Sku to use when creating machines.", - Required: true, - }, - "master_image": schema.StringAttribute{ - Description: "The name of the virtual machine image that will be used.", - Required: true, - }, - "image_ami": schema.StringAttribute{ - Description: "AMI of the AWS image to be used as the template image for the machine catalog.", - Required: true, - }, - }, - }, - "gcp_machine_config": schema.SingleNestedAttribute{ - Description: "Machine Configuration For GCP MCS catalog.", - Optional: true, - Attributes: map[string]schema.Attribute{ - "master_image": schema.StringAttribute{ - Description: "The name of the virtual machine snapshot or VM template that will be used. This identifies the hard disk to be used and the default values for the memory and processors.", - Required: true, - }, - "machine_profile": schema.StringAttribute{ - Description: "The name of the virtual machine template that will be used to identify the default value for the tags, virtual machine size, boot diagnostics, host cache property of OS disk, accelerated networking and availability zone. If not specified, the VM specified in master_image will be used as template.", - Optional: true, - }, - "machine_snapshot": schema.StringAttribute{ - Description: "The name of the virtual machine snapshot of a GCP VM that will be used as master image.", - Optional: true, - }, - "storage_type": schema.StringAttribute{ - Description: "Storage type used for provisioned virtual machine disks on GCP. Storage types include: `pd-standar`, `pd-balanced`, `pd-ssd` and `pd-extreme`.", - Required: true, - Validators: []validator.String{ - stringvalidator.OneOf( - "pd-standard", - "pd-balanced", - "pd-ssd", - "pd-extreme", - ), - }, - }, - "writeback_cache": schema.SingleNestedAttribute{ - Description: "Write-back Cache config. Leave this empty to disable Write-back Cache.", - Optional: true, - Attributes: map[string]schema.Attribute{ - "persist_wbc": schema.BoolAttribute{ - Description: "Persist Write-back Cache", - Required: true, - }, - "wbc_disk_storage_type": schema.StringAttribute{ - Description: "Type of naming scheme. Choose between Numeric and Alphabetic.", - Required: true, - Validators: []validator.String{ - stringvalidator.OneOf( - "pd-standard", - "pd-balanced", - "pd-ssd", - ), - }, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.RequiresReplace(), - }, - }, - "persist_os_disk": schema.BoolAttribute{ - Description: "Persist the OS disk when power cycling the non-persistent provisioned virtual machine.", - Required: true, - PlanModifiers: []planmodifier.Bool{ - boolplanmodifier.RequiresReplace(), - }, - }, - "writeback_cache_disk_size_gb": schema.Int64Attribute{ - Description: "The size in GB of any temporary storage disk used by the write back cache.", - Required: true, - Validators: []validator.Int64{ - int64validator.AtLeast(0), - }, - PlanModifiers: []planmodifier.Int64{ - int64planmodifier.RequiresReplace(), - }, - }, - "writeback_cache_memory_size_mb": schema.Int64Attribute{ - Description: "The size of the in-memory write back cache in MB.", - Optional: true, - Validators: []validator.Int64{ - int64validator.AtLeast(0), - }, - PlanModifiers: []planmodifier.Int64{ // TO DO - Allow updating master image - int64planmodifier.RequiresReplace(), - }, - }, - "persist_vm": schema.BoolAttribute{ - Description: "Not supported for GCP.", - Computed: true, - PlanModifiers: []planmodifier.Bool{ - boolplanmodifier.RequiresReplace(), - }, - }, - "storage_cost_saving": schema.BoolAttribute{ - Description: "Not supported for GCP.", - Computed: true, - PlanModifiers: []planmodifier.Bool{ - boolplanmodifier.RequiresReplace(), - }, - }, - }, - }, - }, - }, - "machine_domain_identity": schema.SingleNestedAttribute{ - Description: "The domain identity for machines in the machine catalog." + "
" + - "Required when identity_type is set to `ActiveDirectory`", - Optional: true, - Attributes: map[string]schema.Attribute{ - "domain": schema.StringAttribute{ - Description: "The AD domain name for the pool. Specify this in FQDN format; for example, MyDomain.com.", - Required: true, - Validators: []validator.String{ - stringvalidator.RegexMatches(regexp.MustCompile(util.DomainFqdnRegex), "must be in FQDN format"), - }, - }, - "domain_ou": schema.StringAttribute{ - Description: "The organization unit that computer accounts will be created into.", - Optional: true, - }, - "service_account": schema.StringAttribute{ - Description: "Service account for the domain. Only the username is required; do not include the domain name.", - Required: true, - }, - "service_account_password": schema.StringAttribute{ - Description: "Service account password for the domain.", - Required: true, - Sensitive: true, - }, - }, - }, - "number_of_total_machines": schema.Int64Attribute{ - Description: "Number of VDA machines allocated in the catalog.", - Required: true, - Validators: []validator.Int64{ - int64validator.AtLeast(1), - }, - }, - "network_mapping": schema.SingleNestedAttribute{ - Description: "Specifies how the attached NICs are mapped to networks. If this parameter is omitted, provisioned VMs are created with a single NIC, which is mapped to the default network in the hypervisor resource pool. If this parameter is supplied, machines are created with the number of NICs specified in the map, and each NIC is attached to the specified network." + "
" + - "Required when `provisioning_scheme.identity_type` is `AzureAD`.", - Optional: true, - Attributes: map[string]schema.Attribute{ - "network_device": schema.StringAttribute{ - Description: "Name or Id of the network device.", - Required: true, - Validators: []validator.String{ - stringvalidator.AlsoRequires(path.Expressions{ - path.MatchRelative().AtParent().AtName("network"), - }...), - }, - }, - "network": schema.StringAttribute{ - Description: "The name of the virtual network that the device should be attached to. This must be a subnet within a Virtual Private Cloud item in the resource pool to which the Machine Catalog is associated." + "
" + - "For AWS, please specify the network mask of the network you want to use within the VPC.", - Required: true, - Validators: []validator.String{ - stringvalidator.AlsoRequires(path.Expressions{ - path.MatchRelative().AtParent().AtName("network_device"), - }...), - }, - }, - }, - }, - "availability_zones": schema.StringAttribute{ - Description: "The Availability Zones for provisioning virtual machines. Use a comma as a delimiter for multiple availability_zones.", - Optional: true, - }, - "identity_type": schema.StringAttribute{ - Description: "The identity type of the machines to be created. Supported values are`ActiveDirectory` and `AzureAD`.", - Required: true, - Validators: []validator.String{ - stringvalidator.OneOf( - string(citrixorchestration.IDENTITYTYPE_ACTIVE_DIRECTORY), - string(citrixorchestration.IDENTITYTYPE_AZURE_AD), - ), - validators.AlsoRequiresOnValues( - []string{ - string(citrixorchestration.IDENTITYTYPE_ACTIVE_DIRECTORY), - }, - path.MatchRelative().AtParent().AtName("machine_domain_identity"), - ), - validators.AlsoRequiresOnValues( - []string{ - string(citrixorchestration.IDENTITYTYPE_AZURE_AD), - }, - path.MatchRelative().AtParent().AtName("azure_machine_config"), - path.MatchRelative().AtParent().AtName("azure_machine_config").AtName("machine_profile"), - path.MatchRelative().AtParent().AtName("network_mapping"), - ), - }, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.RequiresReplace(), - }, - }, - "machine_account_creation_rules": schema.SingleNestedAttribute{ - Description: "Rules specifying how Active Directory machine accounts should be created when machines are provisioned.", - Required: true, - Attributes: map[string]schema.Attribute{ - "naming_scheme": schema.StringAttribute{ - Description: "Defines the template name for AD accounts created in the identity pool.", - Required: true, - }, - "naming_scheme_type": schema.StringAttribute{ - Description: "Type of naming scheme. This defines the format of the variable part of the AD account names that will be created. Choose between `Numeric`, `Alphabetic` and `Unicode`.", - Required: true, - Validators: []validator.String{ - util.GetValidatorFromEnum(citrixorchestration.AllowedAccountNamingSchemeTypeEnumValues), - }, - }, - }, - }, - }, - }, - }, - } -} - -// Configure adds the provider configured client to the resource. -func (r *machineCatalogResource) Configure(_ context.Context, req resource.ConfigureRequest, _ *resource.ConfigureResponse) { - if req.ProviderData == nil { - return - } - - r.client = req.ProviderData.(*citrixdaasclient.CitrixDaasClient) -} - -// Create creates the resource and sets the initial Terraform state. -func (r *machineCatalogResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - defer util.PanicHandler(&resp.Diagnostics) - - // Retrieve values from plan - var plan MachineCatalogResourceModel - diags := req.Plan.Get(ctx, &plan) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } - - provisioningType, err := citrixorchestration.NewProvisioningTypeFromValue(plan.ProvisioningType.ValueString()) - if err != nil { - resp.Diagnostics.AddError( - "Error creating Machine Catalog", - "Unsupported provisioning type.", - ) - - return - } - - var provisioningScheme *citrixorchestration.CreateMachineCatalogProvisioningSchemeRequestModel - var connectionType *citrixorchestration.HypervisorConnectionType - var errorMsg string - var machinesRequest []citrixorchestration.AddMachineToMachineCatalogRequestModel - var body citrixorchestration.CreateMachineCatalogRequestModel - - isRemotePcCatalog := plan.IsRemotePc.ValueBool() - - if *provisioningType == citrixorchestration.PROVISIONINGTYPE_MCS { - if plan.ProvisioningScheme.IdentityType.ValueString() == string(citrixorchestration.IDENTITYTYPE_AZURE_AD) { - if r.client.AuthConfig.OnPremises { - resp.Diagnostics.AddAttributeError( - path.Root("identity_type"), - "Unsupported Machine Catalog Configuration", - fmt.Sprintf("Identity type %s is not supported in OnPremises environment. ", string(citrixorchestration.IDENTITYTYPE_AZURE_AD)), - ) - - return - } - } - - hypervisor, err := util.GetHypervisor(ctx, r.client, &resp.Diagnostics, plan.ProvisioningScheme.Hypervisor.ValueString()) - if err != nil { - return - } - - connectionType = hypervisor.GetConnectionType().Ptr() - - hypervisorResourcePool, err := util.GetHypervisorResourcePool(ctx, r.client, &resp.Diagnostics, plan.ProvisioningScheme.Hypervisor.ValueString(), plan.ProvisioningScheme.HypervisorResourcePool.ValueString()) - if err != nil { - return - } - - provisioningScheme, errorMsg = getProvSchemeForCatalog(ctx, r.client, plan, hypervisor, hypervisorResourcePool) - if errorMsg != "" || provisioningScheme == nil { - resp.Diagnostics.AddError( - "Error creating Machine Catalog", - errorMsg, - ) - - return - } - - body.SetProvisioningScheme(*provisioningScheme) - } else { - // Manual type catalogs - machineType := citrixorchestration.MACHINETYPE_VIRTUAL - if !plan.IsPowerManaged.ValueBool() { - machineType = citrixorchestration.MACHINETYPE_PHYSICAL - } - - body.SetMachineType(machineType) - body.SetIsRemotePC(plan.IsRemotePc.ValueBool()) - - if isRemotePcCatalog { - remotePCEnrollmentScopes := getRemotePcEnrollmentScopes(plan, true) - body.SetRemotePCEnrollmentScopes(remotePCEnrollmentScopes) - } else { - machinesRequest, err = getMachinesForManualCatalogs(ctx, r.client, plan.MachineAccounts) - if err != nil { - resp.Diagnostics.AddError( - "Error creating Machine Catalog", - fmt.Sprintf("Failed to resolve machines, error: %s", err.Error()), - ) - - return - } - body.SetMachines(machinesRequest) - } - } - - // Generate API request body from plan - body.SetName(plan.Name.ValueString()) - body.SetDescription(plan.Description.ValueString()) - body.SetProvisioningType(*provisioningType) // Only support MCS and Manual. Block other types - body.SetMinimumFunctionalLevel(citrixorchestration.FUNCTIONALLEVEL_L7_20) // Hard-coding VDA feature level to be same as QCS - allocationType, err := citrixorchestration.NewAllocationTypeFromValue(plan.AllocationType.ValueString()) - if err != nil { - resp.Diagnostics.AddError( - "Error creating Machine Catalog", - "Unsupported allocation type.", - ) - return - } - body.SetAllocationType(*allocationType) - sessionSupport, err := citrixorchestration.NewSessionSupportFromValue(plan.SessionSupport.ValueString()) - if err != nil { - resp.Diagnostics.AddError( - "Error creating Machine Catalog", - "Unsupported session support.", - ) - return - } - body.SetSessionSupport(*sessionSupport) - persistChanges := citrixorchestration.PERSISTCHANGES_DISCARD - if *sessionSupport == citrixorchestration.SESSIONSUPPORT_SINGLE_SESSION && *allocationType == citrixorchestration.ALLOCATIONTYPE_STATIC { - persistChanges = citrixorchestration.PERSISTCHANGES_ON_LOCAL - } - body.SetPersistUserChanges(persistChanges) - body.SetZone(plan.Zone.ValueString()) - if !plan.VdaUpgradeType.IsNull() { - body.SetVdaUpgradeType(citrixorchestration.VdaUpgradeType(plan.VdaUpgradeType.ValueString())) - } else { - body.SetVdaUpgradeType(citrixorchestration.VDAUPGRADETYPE_NOT_SET) - } - - createMachineCatalogRequest := r.client.ApiClient.MachineCatalogsAPIsDAAS.MachineCatalogsCreateMachineCatalog(ctx) - - // Add domain credential header - if plan.ProvisioningType.ValueString() == string(citrixorchestration.PROVISIONINGTYPE_MCS) && plan.ProvisioningScheme.MachineDomainIdentity != nil { - header := generateAdminCredentialHeader(plan) - createMachineCatalogRequest = createMachineCatalogRequest.XAdminCredential(header) - } - - // Add request body - createMachineCatalogRequest = createMachineCatalogRequest.CreateMachineCatalogRequestModel(body) - - // Make request async - createMachineCatalogRequest = createMachineCatalogRequest.Async(true) - - // Create new machine catalog - _, httpResp, err := citrixdaasclient.AddRequestData(createMachineCatalogRequest, r.client).Execute() - if err != nil { - resp.Diagnostics.AddError( - "Error creating Machine Catalog", - "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ - "\nError message: "+util.ReadClientError(err), - ) - return - } - - err = util.ProcessAsyncJobResponse(ctx, r.client, httpResp, "Error creating Machine Catalog", &resp.Diagnostics, 120) - if err != nil { - return - } - - // Get the new catalog - catalog, err := util.GetMachineCatalog(ctx, r.client, &resp.Diagnostics, plan.Name.ValueString(), true) - - if err != nil { - return - } - - machines, err := util.GetMachineCatalogMachines(ctx, r.client, &resp.Diagnostics, catalog.GetId()) - - if err != nil { - return - } - - // Map response body to schema and populate Computed attribute values - plan = plan.RefreshPropertyValues(ctx, r.client, catalog, connectionType, machines) - - // Set state to fully populated data - diags = resp.State.Set(ctx, plan) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } -} - -// Read refreshes the Terraform state with the latest data. -func (r *machineCatalogResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - defer util.PanicHandler(&resp.Diagnostics) - - // Get current state - var state MachineCatalogResourceModel - diags := req.State.Get(ctx, &state) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } - - // Get refreshed machine catalog state from Orchestration - catalogId := state.Id.ValueString() - - catalog, _, err := readMachineCatalog(ctx, r.client, resp, catalogId) - if err != nil { - return - } - - machineCatalogMachines, err := util.GetMachineCatalogMachines(ctx, r.client, &resp.Diagnostics, catalogId) - if err != nil { - return - } - - // Resolve resource path for service offering and master image - provScheme := catalog.GetProvisioningScheme() - resourcePool := provScheme.GetResourcePool() - hypervisor := resourcePool.GetHypervisor() - hypervisorName := hypervisor.GetName() - - var connectionType *citrixorchestration.HypervisorConnectionType - - if hypervisorName != "" { - hypervisor, err := util.GetHypervisor(ctx, r.client, &resp.Diagnostics, hypervisorName) - if err != nil { - return - } - connectionType = hypervisor.GetConnectionType().Ptr() - } - // Overwrite items with refreshed state - state = state.RefreshPropertyValues(ctx, r.client, catalog, connectionType, machineCatalogMachines) - - // Set refreshed state - diags = resp.State.Set(ctx, &state) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } -} - -// Update updates the resource and sets the updated Terraform state on success. -func (r *machineCatalogResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - defer util.PanicHandler(&resp.Diagnostics) - - // Retrieve values from plan - var plan MachineCatalogResourceModel - var state MachineCatalogResourceModel - diags := req.Plan.Get(ctx, &plan) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } - - diags = req.State.Get(ctx, &state) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } - - // Get refreshed machine catalogs from Orchestration - catalogId := plan.Id.ValueString() - catalogName := plan.Name.ValueString() - catalog, err := util.GetMachineCatalog(ctx, r.client, &resp.Diagnostics, catalogId, true) - - if err != nil { - return - } - - var connectionType *citrixorchestration.HypervisorConnectionType - - // Generate API request body from plan - var body citrixorchestration.UpdateMachineCatalogRequestModel - body.SetName(plan.Name.ValueString()) - body.SetDescription(plan.Description.ValueString()) - body.SetZone(plan.Zone.ValueString()) - if !plan.VdaUpgradeType.IsNull() { - body.SetVdaUpgradeType(citrixorchestration.VdaUpgradeType(plan.VdaUpgradeType.ValueString())) - } else { - body.SetVdaUpgradeType(citrixorchestration.VDAUPGRADETYPE_NOT_SET) - } - - provisioningType, err := citrixorchestration.NewProvisioningTypeFromValue(plan.ProvisioningType.ValueString()) - if err != nil { - resp.Diagnostics.AddError( - "Error creating Machine Catalog", - "Unsupported provisioning type.", - ) - - return - } - - if *provisioningType == citrixorchestration.PROVISIONINGTYPE_MCS { - if plan.ProvisioningScheme.IdentityType.ValueString() == string(citrixorchestration.IDENTITYTYPE_AZURE_AD) { - if r.client.AuthConfig.OnPremises { - resp.Diagnostics.AddAttributeError( - path.Root("identity_type"), - "Unsupported Machine Catalog Configuration", - fmt.Sprintf("Identity type %s is not supported in OnPremises environment. ", string(citrixorchestration.IDENTITYTYPE_AZURE_AD)), - ) - - return - } - } - - hypervisor, err := util.GetHypervisor(ctx, r.client, &resp.Diagnostics, plan.ProvisioningScheme.Hypervisor.ValueString()) - if err != nil { - return - } - - connectionType = hypervisor.GetConnectionType().Ptr() - - hypervisorResourcePool, err := util.GetHypervisorResourcePool(ctx, r.client, &resp.Diagnostics, plan.ProvisioningScheme.Hypervisor.ValueString(), plan.ProvisioningScheme.HypervisorResourcePool.ValueString()) - if err != nil { - return - } - - err = updateCatalogImage(ctx, r.client, resp, catalog, hypervisor, hypervisorResourcePool, plan) - - if err != nil { - return - } - - if catalog.GetTotalCount() > int32(plan.ProvisioningScheme.NumTotalMachines.ValueInt64()) { - // delete machines from machine catalog - err = deleteMachinesFromMcsCatalog(ctx, r.client, resp, catalog, plan) - if err != nil { - return - } - } - - if catalog.GetTotalCount() < int32(plan.ProvisioningScheme.NumTotalMachines.ValueInt64()) { - // add machines to machine catalog - err = addMachinesToMcsCatalog(ctx, r.client, resp, catalog, plan) - if err != nil { - return - } - } - - // Resolve resource path for service offering and master image - switch hypervisor.GetConnectionType() { - case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: - serviceOffering := plan.ProvisioningScheme.AzureMachineConfig.ServiceOffering.ValueString() - queryPath := "serviceoffering.folder" - serviceOfferingPath, err := util.GetSingleResourcePathFromHypervisor(ctx, r.client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, serviceOffering, "serviceoffering", "") - if err != nil { - resp.Diagnostics.AddError( - "Error updating Machine Catalog", - fmt.Sprintf("Failed to resolve service offering %s on Azure, error: %s", serviceOffering, err.Error()), - ) - return - } - body.SetServiceOfferingPath(serviceOfferingPath) - if machineProfile := plan.ProvisioningScheme.AzureMachineConfig.MachineProfile; machineProfile != nil { - machineProfileName := machineProfile.MachineProfileVmName.ValueString() - if machineProfileName != "" { - machineProfileResourceGroup := plan.ProvisioningScheme.AzureMachineConfig.MachineProfile.MachineProfileResourceGroup.ValueString() - queryPath = fmt.Sprintf("machineprofile.folder\\%s.resourcegroup", machineProfileResourceGroup) - machineProfilePath, err := util.GetSingleResourcePathFromHypervisor(ctx, r.client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, machineProfileName, "vm", "") - if err != nil { - resp.Diagnostics.AddError( - "Error updating Machine Catalog", - fmt.Sprintf("Failed to locate machine profile %s on Azure, error: %s", plan.ProvisioningScheme.AzureMachineConfig.MachineProfile.MachineProfileVmName.ValueString(), err.Error()), - ) - return - } - body.SetMachineProfilePath(machineProfilePath) - } - } - case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: - serviceOffering := plan.ProvisioningScheme.AwsMachineConfig.ServiceOffering.ValueString() - serviceOfferingPath, err := util.GetSingleResourcePathFromHypervisor(ctx, r.client, hypervisor.GetName(), hypervisorResourcePool.GetName(), "", serviceOffering, "serviceoffering", "") - if err != nil { - resp.Diagnostics.AddError( - "Error updating Machine Catalog", - fmt.Sprintf("Failed to resolve service offering %s on AWS, error: %s", serviceOffering, err.Error()), - ) - return - } - body.SetServiceOfferingPath(serviceOfferingPath) - case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: - machineProfile := plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString() - if machineProfile != "" { - machineProfilePath, err := util.GetSingleResourcePathFromHypervisor(ctx, r.client, hypervisor.GetName(), hypervisorResourcePool.GetName(), "", plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString(), "vm", "") - if err != nil { - resp.Diagnostics.AddError( - "Error updating Machine Catalog", - fmt.Sprintf("Failed to locate machine profile %s on GCP, error: %s", plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString(), err.Error()), - ) - return - } - body.SetMachineProfilePath(machineProfilePath) - } - } - - if plan.ProvisioningScheme.NetworkMapping != nil { - networkMapping, err := ParseNetworkMappingToClientModel(*plan.ProvisioningScheme.NetworkMapping, hypervisorResourcePool) - if err != nil { - resp.Diagnostics.AddError( - "Error updating Machine Catalog", - fmt.Sprintf("Failed to parse network mapping, error: %s", err.Error()), - ) - return - } - body.SetNetworkMapping(networkMapping) - } - - customProperties := ParseCustomPropertiesToClientModel(*plan.ProvisioningScheme, hypervisor.ConnectionType) - body.SetCustomProperties(customProperties) - } else { - // For manual, compare state and plan to find machines to add and delete - addMachinesList, deleteMachinesMap := createAddAndRemoveMachinesListForManualCatalogs(state, plan) - - addMachinesToManualCatalog(ctx, r.client, resp, addMachinesList, catalogId) - deleteMachinesFromManualCatalog(ctx, r.client, resp, deleteMachinesMap, catalogId, catalog.GetIsPowerManaged()) - - if plan.IsRemotePc.ValueBool() { - remotePCEnrollmentScopes := getRemotePcEnrollmentScopes(plan, false) - body.SetRemotePCEnrollmentScopes(remotePCEnrollmentScopes) - } - } - - updateMachineCatalogRequest := r.client.ApiClient.MachineCatalogsAPIsDAAS.MachineCatalogsUpdateMachineCatalog(ctx, catalogId) - updateMachineCatalogRequest = updateMachineCatalogRequest.UpdateMachineCatalogRequestModel(body) - _, httpResp, err := citrixdaasclient.AddRequestData(updateMachineCatalogRequest, r.client).Execute() - if err != nil { - resp.Diagnostics.AddError( - "Error updating Machine Catalog "+catalogName, - "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ - "\nError message: "+util.ReadClientError(err), - ) - return - } - - // Fetch updated machine catalog from GetMachineCatalog. - catalog, err = util.GetMachineCatalog(ctx, r.client, &resp.Diagnostics, catalogId, true) - if err != nil { - return - } - - machines, err := util.GetMachineCatalogMachines(ctx, r.client, &resp.Diagnostics, catalog.GetId()) - if err != nil { - return - } - - // Update resource state with updated items and timestamp - plan = plan.RefreshPropertyValues(ctx, r.client, catalog, connectionType, machines) - - diags = resp.State.Set(ctx, plan) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } -} - -// Delete deletes the resource and removes the Terraform state on success. -func (r *machineCatalogResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - defer util.PanicHandler(&resp.Diagnostics) - - // Retrieve values from state - var state MachineCatalogResourceModel - diags := req.State.Get(ctx, &state) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } - - catalogId := state.Id.ValueString() - - catalog, httpResp, err := readMachineCatalog(ctx, r.client, nil, catalogId) - - if err != nil { - if httpResp.StatusCode == http.StatusNotFound { - return - } - - resp.Diagnostics.AddError( - "Error reading Machine Catalog "+catalogId, - "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ - "\nError message: "+util.ReadClientError(err), - ) - - return - } - - // Delete existing order - catalogName := state.Name.ValueString() - deleteMachineCatalogRequest := r.client.ApiClient.MachineCatalogsAPIsDAAS.MachineCatalogsDeleteMachineCatalog(ctx, catalogId) - deleteAccountOption := citrixorchestration.MACHINEACCOUNTDELETEOPTION_NONE - deleteVmOption := false - if catalog.ProvisioningType == citrixorchestration.PROVISIONINGTYPE_MCS { - // If there's no provisioning scheme in state, there will not be any machines create by MCS. - // Therefore we will just omit credential for removing machine accounts. - if catalog.ProvisioningScheme != nil { - // Add domain credential header - header := generateAdminCredentialHeader(state) - deleteMachineCatalogRequest = deleteMachineCatalogRequest.XAdminCredential(header) - } - - deleteAccountOption = citrixorchestration.MACHINEACCOUNTDELETEOPTION_DELETE - deleteVmOption = true - } - - deleteMachineCatalogRequest = deleteMachineCatalogRequest.DeleteVm(deleteVmOption).DeleteAccount(deleteAccountOption).Async(true) - httpResp, err = citrixdaasclient.AddRequestData(deleteMachineCatalogRequest, r.client).Execute() - if err != nil && httpResp.StatusCode != http.StatusNotFound { - resp.Diagnostics.AddError( - "Error deleting Machine Catalog "+catalogName, - "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ - "\nError message: "+util.ReadClientError(err), - ) - return - } - - err = util.ProcessAsyncJobResponse(ctx, r.client, httpResp, "Error deleting Machine Catalog "+catalogName, &resp.Diagnostics, 60) - if err != nil { - return - } -} - -func (r *machineCatalogResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - // Retrieve import ID and save to id attribute - resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) -} - -func (r *machineCatalogResource) ValidateConfig(ctx context.Context, req resource.ValidateConfigRequest, resp *resource.ValidateConfigResponse) { - var data MachineCatalogResourceModel - diags := req.Config.Get(ctx, &data) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } - - provisioningTypeMcs := string(citrixorchestration.PROVISIONINGTYPE_MCS) - provisioningTypeManual := string(citrixorchestration.PROVISIONINGTYPE_MANUAL) - - if data.ProvisioningType.ValueString() == provisioningTypeMcs { - if data.ProvisioningScheme == nil { - resp.Diagnostics.AddAttributeError( - path.Root("provisioning_scheme"), - "Missing Attribute Configuration", - fmt.Sprintf("Expected provisioning_scheme to be configured when value of provisioning_type is %s.", provisioningTypeMcs), - ) - } - - if data.ProvisioningScheme != nil && data.ProvisioningScheme.AzureMachineConfig != nil && data.ProvisioningScheme.AzureMachineConfig.WritebackCache != nil { - wbc := data.ProvisioningScheme.AzureMachineConfig.WritebackCache - if !wbc.PersistOsDisk.ValueBool() && wbc.PersistVm.ValueBool() { - resp.Diagnostics.AddAttributeError( - path.Root("persist_vm"), - "Incorrect Attribute Configuration", - "persist_os_disk must be enabled to enable persist_vm.", - ) - } - - if !wbc.PersistWBC.ValueBool() && wbc.StorageCostSaving.ValueBool() { - resp.Diagnostics.AddAttributeError( - path.Root("storage_cost_saving"), - "Incorrect Attribute Configuration", - "persist_wbc must be enabled to enable storage_cost_saving.", - ) - } - } - - if data.MachineAccounts != nil { - resp.Diagnostics.AddAttributeError( - path.Root("machine_accounts"), - "Incorrect Attribute Configuration", - fmt.Sprintf("machine_accounts cannot be configured when provisioning_type is %s.", provisioningTypeMcs), - ) - } - - if data.IsRemotePc.ValueBool() { - resp.Diagnostics.AddAttributeError( - path.Root("is_remote_pc"), - "Incorrect Attribute Configuration", - fmt.Sprintf("Remote PC access catalog cannot be created when provisioning_type is %s.", provisioningTypeMcs), - ) - } - - if !data.IsPowerManaged.IsNull() && !data.IsPowerManaged.ValueBool() { - resp.Diagnostics.AddAttributeError( - path.Root("is_power_managed"), - "Incorrect Attribute Configuration", - fmt.Sprintf("Machines have to be power managed when provisioning_type is %s.", provisioningTypeMcs), - ) - } - - data.IsPowerManaged = types.BoolValue(true) // set power managed to true for MCS catalog - } else { - // Manual provisioning type - if data.IsPowerManaged.IsNull() { - resp.Diagnostics.AddAttributeError( - path.Root("is_power_managed"), - "Missing Attribute Configuration", - fmt.Sprintf("expected is_power_managed to be defined when provisioning_type is %s.", provisioningTypeManual), - ) - } - - if data.IsRemotePc.IsNull() { - resp.Diagnostics.AddAttributeError( - path.Root("is_remote_pc"), - "Missing Attribute Configuration", - fmt.Sprintf(" expected is_remote_pc to be defined when provisioning_type is %s.", provisioningTypeManual), - ) - } - - if data.ProvisioningScheme != nil { - resp.Diagnostics.AddAttributeError( - path.Root("provisioning_scheme"), - "Incorrect Attribute Configuration", - fmt.Sprintf("provisioning_scheme cannot be configured when provisioning_type is not %s.", provisioningTypeMcs), - ) - } - - if data.IsPowerManaged.ValueBool() { - if data.MachineAccounts != nil { - for _, machineAccount := range data.MachineAccounts { - if machineAccount.Hypervisor.IsNull() { - resp.Diagnostics.AddAttributeError( - path.Root("machine_accounts"), - "Missing Attribute Configuration", - "Expected hypervisor to be configured when machines are power managed.", - ) - } - } - } - - if data.IsRemotePc.ValueBool() { - resp.Diagnostics.AddAttributeError( - path.Root("is_remote_pc"), - "Incorrect Attribute Configuration", - "Remote PC Access catalog cannot be power managed.", - ) - } - } - } - - if data.IsRemotePc.ValueBool() { - sessionSupport, err := citrixorchestration.NewSessionSupportFromValue(data.SessionSupport.ValueString()) - if err != nil { - resp.Diagnostics.AddAttributeError( - path.Root("session_support"), - "Incorrect Attribute Configuration", - "Unsupported session support.", - ) - return - } - if sessionSupport != nil && *sessionSupport != citrixorchestration.SESSIONSUPPORT_SINGLE_SESSION { - resp.Diagnostics.AddAttributeError( - path.Root("session_support"), - "Incorrect Attribute Configuration", - "Only Single Session is supported for Remote PC Access catalog.", - ) - } - } -} - -func generateAdminCredentialHeader(plan MachineCatalogResourceModel) string { - credential := fmt.Sprintf("%s\\%s:%s", plan.ProvisioningScheme.MachineDomainIdentity.Domain.ValueString(), plan.ProvisioningScheme.MachineDomainIdentity.ServiceAccount.ValueString(), plan.ProvisioningScheme.MachineDomainIdentity.ServiceAccountPassword.ValueString()) - encodedData := base64.StdEncoding.EncodeToString([]byte(credential)) - header := fmt.Sprintf("Basic %s", encodedData) - - return header -} - -func generateBatchApiHeaders(client *citrixdaasclient.CitrixDaasClient, plan MachineCatalogResourceModel, generateCredentialHeader bool) ([]citrixorchestration.NameValueStringPairModel, *http.Response, error) { - headers := []citrixorchestration.NameValueStringPairModel{} - - cwsAuthToken, httpResp, err := client.SignIn() - var token string - if err != nil { - return headers, httpResp, err - } - - if cwsAuthToken != "" { - token = strings.Split(cwsAuthToken, "=")[1] - var header citrixorchestration.NameValueStringPairModel - header.SetName("Authorization") - header.SetValue("Bearer " + token) - headers = append(headers, header) - } - - if generateCredentialHeader && plan.ProvisioningScheme.MachineDomainIdentity != nil { - adminCredentialHeader := generateAdminCredentialHeader(plan) - var header citrixorchestration.NameValueStringPairModel - header.SetName("X-AdminCredential") - header.SetValue(adminCredentialHeader) - headers = append(headers, header) - } - - return headers, httpResp, err -} - -func readMachineCatalog(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.ReadResponse, machineCatalogId string) (*citrixorchestration.MachineCatalogDetailResponseModel, *http.Response, error) { - getMachineCatalogRequest := client.ApiClient.MachineCatalogsAPIsDAAS.MachineCatalogsGetMachineCatalog(ctx, machineCatalogId).Fields("Id,Name,HypervisorConnection,ProvisioningScheme,RemotePCEnrollmentScopes") - catalog, httpResp, err := util.ReadResource[*citrixorchestration.MachineCatalogDetailResponseModel](getMachineCatalogRequest, ctx, client, resp, "Machine Catalog", machineCatalogId) - - client.ApiClient.MachineCatalogsAPIsDAAS.MachineCatalogsGetMachineCatalogMachines(ctx, machineCatalogId).Execute() - - return catalog, httpResp, err -} - -func updateCatalogImage(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.UpdateResponse, catalog *citrixorchestration.MachineCatalogDetailResponseModel, hypervisor *citrixorchestration.HypervisorDetailResponseModel, hypervisorResourcePool *citrixorchestration.HypervisorResourcePoolDetailResponseModel, plan MachineCatalogResourceModel) error { - - catalogName := catalog.GetName() - catalogId := catalog.GetId() - - provScheme := catalog.GetProvisioningScheme() - masterImage := provScheme.GetMasterImage() - - // Check if XDPath has changed for the image - imagePath := "" - var err error - switch hypervisor.GetConnectionType() { - case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: - newImage := plan.ProvisioningScheme.AzureMachineConfig.MasterImage.ValueString() - resourceGroup := plan.ProvisioningScheme.AzureMachineConfig.ResourceGroup.ValueString() - if newImage != "" { - storageAccount := plan.ProvisioningScheme.AzureMachineConfig.StorageAccount.ValueString() - container := plan.ProvisioningScheme.AzureMachineConfig.Container.ValueString() - if storageAccount != "" && container != "" { - queryPath := fmt.Sprintf( - "image.folder\\%s.resourcegroup\\%s.storageaccount\\%s.container", - resourceGroup, - storageAccount, - container) - imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, newImage, "", "") - if err != nil { - resp.Diagnostics.AddError( - "Error updating Machine Catalog", - fmt.Sprintf("Failed to resolve master image VHD %s in container %s of storage account %s, error: %s", newImage, container, storageAccount, err.Error()), - ) - return err - } - } else { - queryPath := fmt.Sprintf( - "image.folder\\%s.resourcegroup", - resourceGroup) - imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, newImage, "", "") - if err != nil { - resp.Diagnostics.AddError( - "Error updating Machine Catalog", - fmt.Sprintf("Failed to resolve master image Managed Disk or Snapshot %s, error: %s", newImage, err.Error()), - ) - return err - } - } - } else if plan.ProvisioningScheme.AzureMachineConfig.GalleryImage != nil { - gallery := plan.ProvisioningScheme.AzureMachineConfig.GalleryImage.Gallery.ValueString() - definition := plan.ProvisioningScheme.AzureMachineConfig.GalleryImage.Definition.ValueString() - version := plan.ProvisioningScheme.AzureMachineConfig.GalleryImage.Version.ValueString() - if gallery != "" && definition != "" { - queryPath := fmt.Sprintf( - "image.folder\\%s.resourcegroup\\%s.gallery\\%s.imagedefinition", - resourceGroup, - gallery, - definition) - imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, version, "", "") - if err != nil { - resp.Diagnostics.AddError( - "Error updating Machine Catalog", - fmt.Sprintf("Failed to locate Azure Image Gallery image %s of version %s in gallery %s, error: %s", newImage, version, gallery, err.Error()), - ) - return err - } - } - } - case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: - imageId := fmt.Sprintf("%s (%s)", plan.ProvisioningScheme.AwsMachineConfig.MasterImage.ValueString(), plan.ProvisioningScheme.AwsMachineConfig.ImageAmi.ValueString()) - imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), "", imageId, "template", "") - if err != nil { - resp.Diagnostics.AddError( - "Error updating Machine Catalog", - fmt.Sprintf("Failed to locate AWS image %s with AMI %s, error: %s", plan.ProvisioningScheme.AwsMachineConfig.MasterImage.ValueString(), plan.ProvisioningScheme.AwsMachineConfig.ImageAmi.ValueString(), err.Error()), - ) - return err - } - case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: - newImage := plan.ProvisioningScheme.GcpMachineConfig.MasterImage.ValueString() - snapshot := plan.ProvisioningScheme.GcpMachineConfig.MachineSnapshot.ValueString() - if snapshot != "" { - queryPath := fmt.Sprintf("%s.vm", newImage) - imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, plan.ProvisioningScheme.GcpMachineConfig.MachineSnapshot.ValueString(), "snapshot", "") - if err != nil { - resp.Diagnostics.AddError( - "Error updating Machine Catalog", - fmt.Sprintf("Failed to locate master image snapshot %s on GCP, error: %s", plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString(), err.Error()), - ) - return err - } - } else { - imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), "", newImage, "vm", "") - if err != nil { - resp.Diagnostics.AddError( - "Error updating Machine Catalog", - fmt.Sprintf("Failed to locate master image machine %s on GCP, error: %s", plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString(), err.Error()), - ) - return err - } - } - } - - if masterImage.GetXDPath() == imagePath { - return nil - } - - // Update Master Image for Machine Catalog - var updateProvisioningSchemeModel citrixorchestration.UpdateMachineCatalogProvisioningSchemeRequestModel - var rebootOption citrixorchestration.RebootMachinesRequestModel - - // Update the image immediately - rebootOption.SetRebootDuration(60) - rebootOption.SetWarningDuration(15) - rebootOption.SetWarningMessage("Warning: An important update is about to be installed. To ensure that no loss of data occurs, save any outstanding work and close all applications.") - updateProvisioningSchemeModel.SetRebootOptions(rebootOption) - updateProvisioningSchemeModel.SetMasterImagePath(imagePath) - updateProvisioningSchemeModel.SetStoreOldImage(true) - updateProvisioningSchemeModel.SetMinimumFunctionalLevel("L7_20") - updateMasterImageRequest := client.ApiClient.MachineCatalogsAPIsDAAS.MachineCatalogsUpdateMachineCatalogProvisioningScheme(ctx, catalogId) - updateMasterImageRequest = updateMasterImageRequest.UpdateMachineCatalogProvisioningSchemeRequestModel(updateProvisioningSchemeModel) - _, httpResp, err := citrixdaasclient.AddRequestData(updateMasterImageRequest, client).Async(true).Execute() - if err != nil { - resp.Diagnostics.AddError( - "Error updating Image for Machine Catalog "+catalogName, - "TransactionId: "+citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp)+ - "\nError message: "+util.ReadClientError(err), - ) - } - - err = util.ProcessAsyncJobResponse(ctx, client, httpResp, "Error updating Image for Machine Catalog "+catalogName, &resp.Diagnostics, 60) - if err != nil { - return err - } - - return nil -} - -func deleteMachinesFromMcsCatalog(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.UpdateResponse, catalog *citrixorchestration.MachineCatalogDetailResponseModel, plan MachineCatalogResourceModel) error { - catalogId := catalog.GetId() - catalogName := catalog.GetName() - - if catalog.GetAllocationType() != citrixorchestration.ALLOCATIONTYPE_RANDOM { - resp.Diagnostics.AddError( - "Error updating Machine Catalog "+catalogName, - "Deleting machine(s) is supported for machine catalogs with Random allocation type only.", - ) - return fmt.Errorf("deleting machine(s) is supported for machine catalogs with Random allocation type only") - } - - getMachinesResponse, err := util.GetMachineCatalogMachines(ctx, client, &resp.Diagnostics, catalogId) - if err != nil { - return err - } - - machineDeleteRequestCount := int(catalog.GetTotalCount()) - int(plan.ProvisioningScheme.NumTotalMachines.ValueInt64()) - machinesToDelete := []citrixorchestration.MachineResponseModel{} - - for _, machine := range getMachinesResponse.GetItems() { - if !machine.GetDeliveryGroup().Id.IsSet() || machine.GetSessionCount() == 0 { - machinesToDelete = append(machinesToDelete, machine) - } - - if len(machinesToDelete) == machineDeleteRequestCount { - break - } - } - - machinesToDeleteCount := len(machinesToDelete) - - if machineDeleteRequestCount > machinesToDeleteCount { - errorString := fmt.Sprintf("%d machine(s) requested to be deleted. %d machine(s) qualify for deletion.", machineDeleteRequestCount, machinesToDeleteCount) - - resp.Diagnostics.AddError( - "Error deleting machine(s) from Machine Catalog "+catalogName, - errorString+" Ensure machine that needs to be deleted has no active sessions.", - ) - - return err - } - - return deleteMachinesFromCatalog(ctx, client, resp, plan, machinesToDelete, catalogName, true) -} - -func deleteMachinesFromManualCatalog(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.UpdateResponse, deleteMachinesList map[string]bool, catalogNameOrId string, isCatalogPowerManaged bool) error { - - if len(deleteMachinesList) < 1 { - // nothing to delete - return nil - } - - getMachinesResponse, err := util.GetMachineCatalogMachines(ctx, client, &resp.Diagnostics, catalogNameOrId) - if err != nil { - return err - } - - machinesToDelete := []citrixorchestration.MachineResponseModel{} - for _, machine := range getMachinesResponse.Items { - if deleteMachinesList[strings.ToLower(machine.GetName())] { - machinesToDelete = append(machinesToDelete, machine) - } - } - - return deleteMachinesFromCatalog(ctx, client, resp, MachineCatalogResourceModel{}, machinesToDelete, catalogNameOrId, false) -} - -func deleteMachinesFromCatalog(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.UpdateResponse, plan MachineCatalogResourceModel, machinesToDelete []citrixorchestration.MachineResponseModel, catalogNameOrId string, isMcsCatalog bool) error { - batchApiHeaders, httpResp, err := generateBatchApiHeaders(client, plan, false) - txId := citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp) - if err != nil { - resp.Diagnostics.AddError( - "Error updating Machine Catalog "+catalogNameOrId, - "TransactionId: "+txId+ - "\nCould not put machine(s) into maintenance mode before deleting them, unexpected error: "+util.ReadClientError(err), - ) - return err - } - batchRequestItems := []citrixorchestration.BatchRequestItemModel{} - - for index, machineToDelete := range machinesToDelete { - if machineToDelete.DeliveryGroup == nil { - // if machine has no delivery group, there is no need to put it in maintenance mode - continue - } - - isMachineInMaintenanceMode := machineToDelete.GetInMaintenanceMode() - - if !isMachineInMaintenanceMode { - // machine is not in maintenance mode. Put machine in maintenance mode first before deleting - var updateMachineModel citrixorchestration.UpdateMachineRequestModel - updateMachineModel.SetInMaintenanceMode(true) - updateMachineStringBody, err := util.ConvertToString(updateMachineModel) - if err != nil { - resp.Diagnostics.AddError( - "Error removing Machine(s) from Machine Catalog "+catalogNameOrId, - "An unexpected error occurred: "+err.Error(), - ) - return err - } - relativeUrl := fmt.Sprintf("/Machines/%s?async=true", machineToDelete.GetId()) - - var batchRequestItem citrixorchestration.BatchRequestItemModel - batchRequestItem.SetReference(strconv.Itoa(index)) - batchRequestItem.SetMethod(http.MethodPatch) - batchRequestItem.SetRelativeUrl(client.GetBatchRequestItemRelativeUrl(relativeUrl)) - batchRequestItem.SetBody(updateMachineStringBody) - batchRequestItem.SetHeaders(batchApiHeaders) - batchRequestItems = append(batchRequestItems, batchRequestItem) - } - } - - if len(batchRequestItems) > 0 { - // If there are any machines that need to be put in maintenance mode - var batchRequestModel citrixorchestration.BatchRequestModel - batchRequestModel.SetItems(batchRequestItems) - successfulJobs, txId, err := citrixdaasclient.PerformBatchOperation(ctx, client, batchRequestModel) - if err != nil { - resp.Diagnostics.AddError( - "Error deleting machine(s) from Machine Catalog "+catalogNameOrId, - "TransactionId: "+txId+ - "\nError message: "+util.ReadClientError(err), - ) - return err - } - - if successfulJobs < len(batchRequestItems) { - errMsg := fmt.Sprintf("An error occurred while putting machine(s) into maintenance mode before deleting them. %d of %d machines were put in the maintenance mode.", successfulJobs, len(batchRequestItems)) - err = fmt.Errorf(errMsg) - resp.Diagnostics.AddError( - "Error updating Machine Catalog "+catalogNameOrId, - "TransactionId: "+txId+ - "\n"+errMsg, - ) - - return err - } - } - - batchApiHeaders, httpResp, err = generateBatchApiHeaders(client, plan, isMcsCatalog) - txId = citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp) - if err != nil { - resp.Diagnostics.AddError( - "Error updating Machine Catalog "+catalogNameOrId, - "TransactionId: "+txId+ - "\nCould not delete machine(s) from machine catalog, unexpected error: "+util.ReadClientError(err), - ) - return err - } - - deleteAccountOpion := "Leave" - if isMcsCatalog { - deleteAccountOpion = "Delete" - } - batchRequestItems = []citrixorchestration.BatchRequestItemModel{} - for index, machineToDelete := range machinesToDelete { - var batchRequestItem citrixorchestration.BatchRequestItemModel - relativeUrl := fmt.Sprintf("/Machines/%s?deleteVm=%t&purgeDBOnly=false&deleteAccount=%s&async=true", machineToDelete.GetId(), isMcsCatalog, deleteAccountOpion) - batchRequestItem.SetReference(strconv.Itoa(index)) - batchRequestItem.SetMethod(http.MethodDelete) - batchRequestItem.SetHeaders(batchApiHeaders) - batchRequestItem.SetRelativeUrl(client.GetBatchRequestItemRelativeUrl(relativeUrl)) - batchRequestItems = append(batchRequestItems, batchRequestItem) - } - - batchRequestModel := citrixorchestration.BatchRequestModel{} - batchRequestModel.SetItems(batchRequestItems) - successfulJobs, txId, err := citrixdaasclient.PerformBatchOperation(ctx, client, batchRequestModel) - if err != nil { - resp.Diagnostics.AddError( - "Error deleting machine(s) from Machine Catalog "+catalogNameOrId, - "TransactionId: "+txId+ - "\nError message: "+util.ReadClientError(err), - ) - return err - } - - if successfulJobs < len(machinesToDelete) { - errMsg := fmt.Sprintf("An error occurred while deleting machine(s) from Machine Catalog. %d of %d machines were deleted from the Machine Catalog.", successfulJobs, len(batchRequestItems)) - err = fmt.Errorf(errMsg) - resp.Diagnostics.AddError( - "Error updating Machine Catalog "+catalogNameOrId, - "TransactionId: "+txId+ - "\n"+errMsg, - ) - - return err - } - - return nil -} - -func addMachinesToMcsCatalog(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.UpdateResponse, catalog *citrixorchestration.MachineCatalogDetailResponseModel, plan MachineCatalogResourceModel) error { - catalogId := catalog.GetId() - catalogName := catalog.GetName() - - addMachinesCount := int32(plan.ProvisioningScheme.NumTotalMachines.ValueInt64()) - catalog.GetTotalCount() - - var updateMachineAccountCreationRule citrixorchestration.UpdateMachineAccountCreationRulesRequestModel - updateMachineAccountCreationRule.SetNamingScheme(plan.ProvisioningScheme.MachineAccountCreationRules.NamingScheme.ValueString()) - namingScheme, err := citrixorchestration.NewNamingSchemeTypeFromValue(plan.ProvisioningScheme.MachineAccountCreationRules.NamingSchemeType.ValueString()) - if err != nil { - resp.Diagnostics.AddError( - "Error adding Machine to Machine Catalog "+catalogName, - "Unsupported machine account naming scheme type.", - ) - return err - } - updateMachineAccountCreationRule.SetNamingSchemeType(*namingScheme) - if plan.ProvisioningScheme.MachineDomainIdentity != nil { - updateMachineAccountCreationRule.SetDomain(plan.ProvisioningScheme.MachineDomainIdentity.Domain.ValueString()) - updateMachineAccountCreationRule.SetOU(plan.ProvisioningScheme.MachineDomainIdentity.Ou.ValueString()) - } - - var addMachineRequestBody citrixorchestration.AddMachineToMachineCatalogDetailRequestModel - addMachineRequestBody.SetMachineAccountCreationRules(updateMachineAccountCreationRule) - - addMachineRequestStringBody, err := util.ConvertToString(addMachineRequestBody) - if err != nil { - resp.Diagnostics.AddError( - "Error adding Machine to Machine Catalog "+catalogName, - "An unexpected error occurred: "+err.Error(), - ) - return err - } - - batchApiHeaders, httpResp, err := generateBatchApiHeaders(client, plan, true) - txId := citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp) - if err != nil { - resp.Diagnostics.AddError( - "Error updating Machine Catalog "+catalogName, - "TransactionId: "+txId+ - "\nCould not add machine to Machine Catalog, unexpected error: "+util.ReadClientError(err), - ) - return err - } - - batchRequestItems := []citrixorchestration.BatchRequestItemModel{} - relativeUrl := fmt.Sprintf("/MachineCatalogs/%s/Machines?async=true", catalogId) - for i := 0; i < int(addMachinesCount); i++ { - var batchRequestItem citrixorchestration.BatchRequestItemModel - batchRequestItem.SetMethod(http.MethodPost) - batchRequestItem.SetReference(strconv.Itoa(i)) - batchRequestItem.SetRelativeUrl(client.GetBatchRequestItemRelativeUrl(relativeUrl)) - batchRequestItem.SetBody(addMachineRequestStringBody) - batchRequestItem.SetHeaders(batchApiHeaders) - batchRequestItems = append(batchRequestItems, batchRequestItem) - } - - var batchRequestModel citrixorchestration.BatchRequestModel - batchRequestModel.SetItems(batchRequestItems) - successfulJobs, txId, err := citrixdaasclient.PerformBatchOperation(ctx, client, batchRequestModel) - if err != nil { - resp.Diagnostics.AddError( - "Error adding machine(s) to Machine Catalog "+catalogName, - "TransactionId: "+txId+ - "\nError message: "+util.ReadClientError(err), - ) - return err - } - - if successfulJobs < int(addMachinesCount) { - errMsg := fmt.Sprintf("An error occurred while adding machine(s) to the Machine Catalog. %d of %d machines were added to the Machine Catalog.", successfulJobs, addMachinesCount) - err = fmt.Errorf(errMsg) - resp.Diagnostics.AddError( - "Error updating Machine Catalog "+catalogName, - "TransactionId: "+txId+ - "\n"+errMsg, - ) - - return err - } - - return nil -} - -func addMachinesToManualCatalog(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.UpdateResponse, addMachinesList []MachineAccountsModel, catalogIdOrName string) error { - - if len(addMachinesList) < 1 { - // no machines to add - return nil - } - - addMachinesRequest, err := getMachinesForManualCatalogs(ctx, client, addMachinesList) - if err != nil { - resp.Diagnostics.AddError( - "Error adding machines(s) to Machine Catalog "+catalogIdOrName, - fmt.Sprintf("Failed to resolve machines, error: %s", err.Error()), - ) - - return err - } - - batchApiHeaders, httpResp, err := generateBatchApiHeaders(client, MachineCatalogResourceModel{}, false) - txId := citrixdaasclient.GetTransactionIdFromHttpResponse(httpResp) - if err != nil { - resp.Diagnostics.AddError( - "Error updating Machine Catalog "+catalogIdOrName, - "TransactionId: "+txId+ - "\nCould not add machine to Machine Catalog, unexpected error: "+util.ReadClientError(err), - ) - return err - } - - batchRequestItems := []citrixorchestration.BatchRequestItemModel{} - relativeUrl := fmt.Sprintf("/MachineCatalogs/%s/Machines?async=true", catalogIdOrName) - for i := 0; i < len(addMachinesRequest); i++ { - addMachineRequestStringBody, err := util.ConvertToString(addMachinesRequest[i]) - if err != nil { - resp.Diagnostics.AddError( - "Error adding Machine to Machine Catalog "+catalogIdOrName, - "An unexpected error occurred: "+err.Error(), - ) - return err - } - var batchRequestItem citrixorchestration.BatchRequestItemModel - batchRequestItem.SetMethod(http.MethodPost) - batchRequestItem.SetReference(strconv.Itoa(i)) - batchRequestItem.SetRelativeUrl(client.GetBatchRequestItemRelativeUrl(relativeUrl)) - batchRequestItem.SetBody(addMachineRequestStringBody) - batchRequestItem.SetHeaders(batchApiHeaders) - batchRequestItems = append(batchRequestItems, batchRequestItem) - } - - var batchRequestModel citrixorchestration.BatchRequestModel - batchRequestModel.SetItems(batchRequestItems) - successfulJobs, txId, err := citrixdaasclient.PerformBatchOperation(ctx, client, batchRequestModel) - if err != nil { - resp.Diagnostics.AddError( - "Error adding machine(s) to Machine Catalog "+catalogIdOrName, - "TransactionId: "+txId+ - "\nError message: "+util.ReadClientError(err), - ) - return err - } - - if successfulJobs < len(addMachinesList) { - errMsg := fmt.Sprintf("An error occurred while adding machine(s) to the Machine Catalog. %d of %d machines were added to the Machine Catalog.", successfulJobs, len(addMachinesList)) - err = fmt.Errorf(errMsg) - resp.Diagnostics.AddError( - "Error updating Machine Catalog "+catalogIdOrName, - "TransactionId: "+txId+ - "\n"+errMsg, - ) - - return err - } - - return nil -} - -func getProvSchemeForCatalog(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, plan MachineCatalogResourceModel, hypervisor *citrixorchestration.HypervisorDetailResponseModel, hypervisorResourcePool *citrixorchestration.HypervisorResourcePoolDetailResponseModel) (*citrixorchestration.CreateMachineCatalogProvisioningSchemeRequestModel, string) { - - var machineAccountCreationRules citrixorchestration.MachineAccountCreationRulesRequestModel - machineAccountCreationRules.SetNamingScheme(plan.ProvisioningScheme.MachineAccountCreationRules.NamingScheme.ValueString()) - namingScheme, err := citrixorchestration.NewNamingSchemeTypeFromValue(plan.ProvisioningScheme.MachineAccountCreationRules.NamingSchemeType.ValueString()) - if err != nil { - return nil, "Unsupported machine account naming scheme type." - } - - machineAccountCreationRules.SetNamingSchemeType(*namingScheme) - if plan.ProvisioningScheme.MachineDomainIdentity != nil { - machineAccountCreationRules.SetDomain(plan.ProvisioningScheme.MachineDomainIdentity.Domain.ValueString()) - machineAccountCreationRules.SetOU(plan.ProvisioningScheme.MachineDomainIdentity.Ou.ValueString()) - } - - var provisioningScheme citrixorchestration.CreateMachineCatalogProvisioningSchemeRequestModel - provisioningScheme.SetNumTotalMachines(int32(plan.ProvisioningScheme.NumTotalMachines.ValueInt64())) - identityType := citrixorchestration.IdentityType(plan.ProvisioningScheme.IdentityType.ValueString()) - provisioningScheme.SetIdentityType(identityType) - provisioningScheme.SetWorkGroupMachines(false) // Non-Managed setup does not support non-domain joined - if identityType == citrixorchestration.IDENTITYTYPE_AZURE_AD { - provisioningScheme.SetWorkGroupMachines(true) - } - provisioningScheme.SetMachineAccountCreationRules(machineAccountCreationRules) - provisioningScheme.SetResourcePool(plan.ProvisioningScheme.HypervisorResourcePool.ValueString()) - - switch hypervisor.GetConnectionType() { - case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: - serviceOffering := plan.ProvisioningScheme.AzureMachineConfig.ServiceOffering.ValueString() - queryPath := "serviceoffering.folder" - serviceOfferingPath, err := util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, serviceOffering, "serviceoffering", "") - if err != nil { - return nil, fmt.Sprintf("Failed to resolve service offering %s on Azure, error: %s", serviceOffering, err.Error()) - } - provisioningScheme.SetServiceOfferingPath(serviceOfferingPath) - - resourceGroup := plan.ProvisioningScheme.AzureMachineConfig.ResourceGroup.ValueString() - masterImage := plan.ProvisioningScheme.AzureMachineConfig.MasterImage.ValueString() - imagePath := "" - if masterImage != "" { - storageAccount := plan.ProvisioningScheme.AzureMachineConfig.StorageAccount.ValueString() - container := plan.ProvisioningScheme.AzureMachineConfig.Container.ValueString() - if storageAccount != "" && container != "" { - queryPath = fmt.Sprintf( - "image.folder\\%s.resourcegroup\\%s.storageaccount\\%s.container", - resourceGroup, - storageAccount, - container) - imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, masterImage, "", "") - if err != nil { - return nil, fmt.Sprintf("Failed to resolve master image VHD %s in container %s of storage account %s, error: %s", masterImage, container, storageAccount, err.Error()) - } - } else { - queryPath = fmt.Sprintf( - "image.folder\\%s.resourcegroup", - resourceGroup) - imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, masterImage, "", "") - if err != nil { - return nil, fmt.Sprintf("Failed to resolve master image Managed Disk or Snapshot %s, error: %s", masterImage, err.Error()) - } - } - } else if plan.ProvisioningScheme.AzureMachineConfig.GalleryImage != nil { - gallery := plan.ProvisioningScheme.AzureMachineConfig.GalleryImage.Gallery.ValueString() - definition := plan.ProvisioningScheme.AzureMachineConfig.GalleryImage.Definition.ValueString() - version := plan.ProvisioningScheme.AzureMachineConfig.GalleryImage.Version.ValueString() - if gallery != "" && definition != "" { - queryPath = fmt.Sprintf( - "image.folder\\%s.resourcegroup\\%s.gallery\\%s.imagedefinition", - resourceGroup, - gallery, - definition) - imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, version, "", "") - if err != nil { - return nil, fmt.Sprintf("Failed to locate Azure Image Gallery image %s of version %s in gallery %s, error: %s", masterImage, version, gallery, err.Error()) - } - } - } - - provisioningScheme.SetMasterImagePath(imagePath) - - machineProfile := plan.ProvisioningScheme.AzureMachineConfig.MachineProfile - if machineProfile != nil { - machine := machineProfile.MachineProfileVmName.ValueString() - machineProfileResourceGroup := machineProfile.MachineProfileResourceGroup.ValueString() - queryPath = fmt.Sprintf("machineprofile.folder\\%s.resourcegroup", machineProfileResourceGroup) - machineProfilePath, err := util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, machine, "vm", "") - if err != nil { - return nil, fmt.Sprintf("Failed to locate machine profile %s on Azure, error: %s", plan.ProvisioningScheme.AzureMachineConfig.MachineProfile.MachineProfileVmName.ValueString(), err.Error()) - } - provisioningScheme.SetMachineProfilePath(machineProfilePath) - } - - if plan.ProvisioningScheme.AzureMachineConfig.WritebackCache != nil { - provisioningScheme.SetUseWriteBackCache(true) - provisioningScheme.SetWriteBackCacheDiskSizeGB(int32(plan.ProvisioningScheme.AzureMachineConfig.WritebackCache.WriteBackCacheDiskSizeGB.ValueInt64())) - if !plan.ProvisioningScheme.AzureMachineConfig.WritebackCache.WriteBackCacheMemorySizeMB.IsNull() { - provisioningScheme.SetWriteBackCacheMemorySizeMB(int32(plan.ProvisioningScheme.AzureMachineConfig.WritebackCache.WriteBackCacheMemorySizeMB.ValueInt64())) - } - if plan.ProvisioningScheme.AzureMachineConfig.WritebackCache.PersistVm.ValueBool() && !plan.ProvisioningScheme.AzureMachineConfig.WritebackCache.PersistOsDisk.ValueBool() { - return nil, "Could not set persist_vm attribute, which can only be set when persist_os_disk = true" - } - } - case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: - serviceOffering := plan.ProvisioningScheme.AwsMachineConfig.ServiceOffering.ValueString() - serviceOfferingPath, err := util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), "", serviceOffering, "serviceoffering", "") - if err != nil { - return nil, fmt.Sprintf("Failed to resolve service offering %s on AWS, error: %s", serviceOffering, err.Error()) - } - provisioningScheme.SetServiceOfferingPath(serviceOfferingPath) - - masterImage := plan.ProvisioningScheme.AwsMachineConfig.MasterImage.ValueString() - imageId := fmt.Sprintf("%s (%s)", masterImage, plan.ProvisioningScheme.AwsMachineConfig.ImageAmi.ValueString()) - imagePath, err := util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), "", imageId, "template", "") - if err != nil { - return nil, fmt.Sprintf("Failed to locate AWS image %s with AMI %s, error: %s", masterImage, plan.ProvisioningScheme.AwsMachineConfig.ImageAmi.ValueString(), err.Error()) - } - provisioningScheme.SetMasterImagePath(imagePath) - case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: - imagePath := "" - snapshot := plan.ProvisioningScheme.GcpMachineConfig.MachineSnapshot.ValueString() - if snapshot != "" { - queryPath := fmt.Sprintf("%s.vm", plan.ProvisioningScheme.GcpMachineConfig.MasterImage.ValueString()) - imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), queryPath, plan.ProvisioningScheme.GcpMachineConfig.MachineSnapshot.ValueString(), "snapshot", "") - if err != nil { - return nil, fmt.Sprintf("Failed to locate master image snapshot %s on GCP, error: %s", plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString(), err.Error()) - } - } else { - imagePath, err = util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), "", plan.ProvisioningScheme.GcpMachineConfig.MasterImage.ValueString(), "vm", "") - if err != nil { - return nil, fmt.Sprintf("Failed to locate master image machine %s on GCP, error: %s", plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString(), err.Error()) - } - } - - provisioningScheme.SetMasterImagePath(imagePath) - - machineProfile := plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString() - if machineProfile != "" { - machineProfilePath, err := util.GetSingleResourcePathFromHypervisor(ctx, client, hypervisor.GetName(), hypervisorResourcePool.GetName(), "", machineProfile, "vm", "") - if err != nil { - return nil, fmt.Sprintf("Failed to locate machine profile %s on GCP, error: %s", plan.ProvisioningScheme.GcpMachineConfig.MachineProfile.ValueString(), err.Error()) - } - provisioningScheme.SetMachineProfilePath(machineProfilePath) - } - - if plan.ProvisioningScheme.GcpMachineConfig.WritebackCache != nil { - provisioningScheme.SetUseWriteBackCache(true) - provisioningScheme.SetWriteBackCacheDiskSizeGB(int32(plan.ProvisioningScheme.GcpMachineConfig.WritebackCache.WriteBackCacheDiskSizeGB.ValueInt64())) - if !plan.ProvisioningScheme.GcpMachineConfig.WritebackCache.WriteBackCacheMemorySizeMB.IsNull() { - provisioningScheme.SetWriteBackCacheMemorySizeMB(int32(plan.ProvisioningScheme.GcpMachineConfig.WritebackCache.WriteBackCacheMemorySizeMB.ValueInt64())) - } - if plan.ProvisioningScheme.GcpMachineConfig.WritebackCache.PersistVm.ValueBool() && !plan.ProvisioningScheme.GcpMachineConfig.WritebackCache.PersistOsDisk.ValueBool() { - return nil, "Could not set persist_vm attribute, which can only be set when persist_os_disk = true" - } - - } - } - - if plan.ProvisioningScheme.NetworkMapping != nil { - networkMapping, err := ParseNetworkMappingToClientModel(*plan.ProvisioningScheme.NetworkMapping, hypervisorResourcePool) - if err != nil { - return nil, err.Error() - } - provisioningScheme.SetNetworkMapping(networkMapping) - } - - customProperties := ParseCustomPropertiesToClientModel(*plan.ProvisioningScheme, hypervisor.ConnectionType) - provisioningScheme.SetCustomProperties(customProperties) - - return &provisioningScheme, "" -} - -func getMachinesForManualCatalogs(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, machineAccounts []MachineAccountsModel) ([]citrixorchestration.AddMachineToMachineCatalogRequestModel, error) { - if machineAccounts == nil { - return nil, nil - } - - addMachineRequestList := []citrixorchestration.AddMachineToMachineCatalogRequestModel{} - for _, machineAccount := range machineAccounts { - hypervisorId := machineAccount.Hypervisor.ValueString() - var hypervisor *citrixorchestration.HypervisorDetailResponseModel - var err error - if hypervisorId != "" { - hypervisor, err = util.GetHypervisor(ctx, client, nil, hypervisorId) - - if err != nil { - return nil, err - } - } - - for _, machine := range machineAccount.Machines { - addMachineRequest := citrixorchestration.AddMachineToMachineCatalogRequestModel{} - addMachineRequest.SetMachineName(machine.MachineName.ValueString()) - - if hypervisorId == "" { - // no hypervisor, non-power managed manual catalog - addMachineRequestList = append(addMachineRequestList, addMachineRequest) - continue - } - - machineName := strings.Split(machine.MachineName.ValueString(), "\\")[1] - var vmId string - connectionType := hypervisor.GetConnectionType() - switch connectionType { - case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: - if machine.Region.IsNull() || machine.ResourceGroupName.IsNull() { - return nil, fmt.Errorf("region and resource_group_name are required for Azure") - } - region, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, "", machine.Region.ValueString(), "", "", connectionType) - if err != nil { - return nil, err - } - regionPath := region.GetXDPath() - vm, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, fmt.Sprintf("%s/vm.folder", regionPath), machineName, "", machine.ResourceGroupName.ValueString(), connectionType) - if err != nil { - return nil, err - } - vmId = vm.GetId() - case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: - if machine.AvailabilityZone.IsNull() { - return nil, fmt.Errorf("availability_zone is required for AWS") - } - availabilityZone, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, "", machine.AvailabilityZone.ValueString(), "", "", connectionType) - if err != nil { - return nil, err - } - availabilityZonePath := availabilityZone.GetXDPath() - vm, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, availabilityZonePath, machineName, "Vm", "", connectionType) - if err != nil { - return nil, err - } - vmId = vm.GetId() - case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: - if machine.Region.IsNull() || machine.ProjectName.IsNull() { - return nil, fmt.Errorf("region and project_name are required for GCP") - } - projectName, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, "", machine.ProjectName.ValueString(), "", "", connectionType) - if err != nil { - return nil, err - } - projectNamePath := projectName.GetXDPath() - vm, err := util.GetSingleHypervisorResource(ctx, client, hypervisorId, fmt.Sprintf("%s\\%s.region", projectNamePath, machine.Region.ValueString()), machineName, "Vm", "", connectionType) - if err != nil { - return nil, err - } - vmId = vm.GetId() - } - - addMachineRequest.SetHostedMachineId(vmId) - addMachineRequest.SetHypervisorConnection(hypervisorId) - - addMachineRequestList = append(addMachineRequestList, addMachineRequest) - } - } - - return addMachineRequestList, nil -} - -func createAddAndRemoveMachinesListForManualCatalogs(state, plan MachineCatalogResourceModel) ([]MachineAccountsModel, map[string]bool) { - addMachinesList := []MachineAccountsModel{} - existingMachineAccounts := map[string]map[string]bool{} - - // create map for existing machines marking all machines for deletion - if state.MachineAccounts != nil { - for _, machineAccount := range state.MachineAccounts { - for _, machine := range machineAccount.Machines { - machineMap, exists := existingMachineAccounts[machineAccount.Hypervisor.ValueString()] - if !exists { - existingMachineAccounts[machineAccount.Hypervisor.ValueString()] = map[string]bool{} - machineMap = existingMachineAccounts[machineAccount.Hypervisor.ValueString()] - } - machineMap[strings.ToLower(machine.MachineName.ValueString())] = true - } - } - } - - // iterate over plan and if machine already exists, mark false for deletion. If not, add it to the addMachineList - if plan.MachineAccounts != nil { - for _, machineAccount := range plan.MachineAccounts { - machineAccountMachines := []MachineCatalogMachineModel{} - for _, machine := range machineAccount.Machines { - if existingMachineAccounts[machineAccount.Hypervisor.ValueString()][strings.ToLower(machine.MachineName.ValueString())] { - // Machine exists. Mark false for deletion - existingMachineAccounts[machineAccount.Hypervisor.ValueString()][strings.ToLower(machine.MachineName.ValueString())] = false - } else { - // Machine does not exist and needs to be added - machineAccountMachines = append(machineAccountMachines, machine) - } - } - - if len(machineAccountMachines) > 0 { - var addMachineAccount MachineAccountsModel - addMachineAccount.Hypervisor = machineAccount.Hypervisor - addMachineAccount.Machines = machineAccountMachines - addMachinesList = append(addMachinesList, addMachineAccount) - } - } - } - - deleteMachinesMap := map[string]bool{} - - for _, machineMap := range existingMachineAccounts { - for machineName, canBeDeleted := range machineMap { - if canBeDeleted { - deleteMachinesMap[machineName] = true - } - } - } - - return addMachinesList, deleteMachinesMap -} - -func getRemotePcEnrollmentScopes(plan MachineCatalogResourceModel, includeMachines bool) []citrixorchestration.RemotePCEnrollmentScopeRequestModel { - remotePCEnrollmentScopes := []citrixorchestration.RemotePCEnrollmentScopeRequestModel{} - if plan.RemotePcOus != nil { - for _, ou := range plan.RemotePcOus { - var remotePCEnrollmentScope citrixorchestration.RemotePCEnrollmentScopeRequestModel - remotePCEnrollmentScope.SetIncludeSubfolders(ou.IncludeSubFolders.ValueBool()) - remotePCEnrollmentScope.SetOU(ou.OUName.ValueString()) - remotePCEnrollmentScope.SetIsOrganizationalUnit(true) - remotePCEnrollmentScopes = append(remotePCEnrollmentScopes, remotePCEnrollmentScope) - } - } - - if includeMachines && plan.MachineAccounts != nil { - for _, machineAccount := range plan.MachineAccounts { - for _, machine := range machineAccount.Machines { - var remotePCEnrollmentScope citrixorchestration.RemotePCEnrollmentScopeRequestModel - remotePCEnrollmentScope.SetIncludeSubfolders(false) - remotePCEnrollmentScope.SetOU(machine.MachineName.ValueString()) - remotePCEnrollmentScope.SetIsOrganizationalUnit(false) - remotePCEnrollmentScopes = append(remotePCEnrollmentScopes, remotePCEnrollmentScope) - } - } - } - - return remotePCEnrollmentScopes -} diff --git a/internal/daas/resources/machine_catalog/machine_catalog_resource_model.go b/internal/daas/resources/machine_catalog/machine_catalog_resource_model.go deleted file mode 100644 index 94c9843..0000000 --- a/internal/daas/resources/machine_catalog/machine_catalog_resource_model.go +++ /dev/null @@ -1,538 +0,0 @@ -// Copyright © 2023. Citrix Systems, Inc. - -package machine_catalog - -import ( - "context" - "fmt" - "reflect" - "strings" - - citrixorchestration "github.com/citrix/citrix-daas-rest-go/citrixorchestration" - citrixclient "github.com/citrix/citrix-daas-rest-go/client" - - "github.com/citrix/terraform-provider-citrix/internal/util" - - "github.com/hashicorp/terraform-plugin-framework/types" - - "golang.org/x/exp/slices" -) - -// MachineCatalogResourceModel maps the resource schema data. -type MachineCatalogResourceModel struct { - Id types.String `tfsdk:"id"` - Name types.String `tfsdk:"name"` - Description types.String `tfsdk:"description"` - IsPowerManaged types.Bool `tfsdk:"is_power_managed"` - IsRemotePc types.Bool `tfsdk:"is_remote_pc"` - AllocationType types.String `tfsdk:"allocation_type"` - SessionSupport types.String `tfsdk:"session_support"` - Zone types.String `tfsdk:"zone"` - VdaUpgradeType types.String `tfsdk:"vda_upgrade_type"` - ProvisioningType types.String `tfsdk:"provisioning_type"` - ProvisioningScheme *ProvisioningSchemeModel `tfsdk:"provisioning_scheme"` - MachineAccounts []MachineAccountsModel `tfsdk:"machine_accounts"` - RemotePcOus []RemotePcOuModel `tfsdk:"remote_pc_ous"` -} - -type MachineAccountsModel struct { - Hypervisor types.String `tfsdk:"hypervisor"` - Machines []MachineCatalogMachineModel `tfsdk:"machines"` -} - -type MachineCatalogMachineModel struct { - MachineName types.String `tfsdk:"machine_name"` - Region types.String `tfsdk:"region"` - ResourceGroupName types.String `tfsdk:"resource_group_name"` - ProjectName types.String `tfsdk:"project_name"` - AvailabilityZone types.String `tfsdk:"availability_zone"` -} - -// ProvisioningSchemeModel maps the nested provisioning scheme resource schema data. -type ProvisioningSchemeModel struct { - Hypervisor types.String `tfsdk:"hypervisor"` - HypervisorResourcePool types.String `tfsdk:"hypervisor_resource_pool"` - AzureMachineConfig *AzureMachineConfigModel `tfsdk:"azure_machine_config"` - AwsMachineConfig *AwsMachineConfigModel `tfsdk:"aws_machine_config"` - GcpMachineConfig *GcpMachineConfigModel `tfsdk:"gcp_machine_config"` - NumTotalMachines types.Int64 `tfsdk:"number_of_total_machines"` - NetworkMapping *NetworkMappingModel `tfsdk:"network_mapping"` - AvailabilityZones types.String `tfsdk:"availability_zones"` - IdentityType types.String `tfsdk:"identity_type"` - MachineDomainIdentity *MachineDomainIdentityModel `tfsdk:"machine_domain_identity"` - MachineAccountCreationRules *MachineAccountCreationRulesModel `tfsdk:"machine_account_creation_rules"` -} - -type MachineProfileModel struct { - MachineProfileVmName types.String `tfsdk:"machine_profile_vm_name"` - MachineProfileResourceGroup types.String `tfsdk:"machine_profile_resource_group"` -} - -type MachineDomainIdentityModel struct { - Domain types.String `tfsdk:"domain"` - Ou types.String `tfsdk:"domain_ou"` - ServiceAccount types.String `tfsdk:"service_account"` - ServiceAccountPassword types.String `tfsdk:"service_account_password"` -} - -type GalleryImageModel struct { - Gallery types.String `tfsdk:"gallery"` - Definition types.String `tfsdk:"definition"` - Version types.String `tfsdk:"version"` -} - -// WritebackCacheModel maps the write back cacheconfiguration schema data. -type WritebackCacheModel struct { - PersistWBC types.Bool `tfsdk:"persist_wbc"` - WBCDiskStorageType types.String `tfsdk:"wbc_disk_storage_type"` - PersistOsDisk types.Bool `tfsdk:"persist_os_disk"` - PersistVm types.Bool `tfsdk:"persist_vm"` - StorageCostSaving types.Bool `tfsdk:"storage_cost_saving"` - WriteBackCacheDiskSizeGB types.Int64 `tfsdk:"writeback_cache_disk_size_gb"` - WriteBackCacheMemorySizeMB types.Int64 `tfsdk:"writeback_cache_memory_size_mb"` -} - -// MachineAccountCreationRulesModel maps the nested machine account creation rules resource schema data. -type MachineAccountCreationRulesModel struct { - NamingScheme types.String `tfsdk:"naming_scheme"` - NamingSchemeType types.String `tfsdk:"naming_scheme_type"` -} - -// NetworkMappingModel maps the nested network mapping resource schema data. -type NetworkMappingModel struct { - NetworkDevice types.String `tfsdk:"network_device"` - Network types.String `tfsdk:"network"` -} - -type RemotePcOuModel struct { - IncludeSubFolders types.Bool `tfsdk:"include_subfolders"` - OUName types.String `tfsdk:"ou_name"` -} - -func (r MachineCatalogResourceModel) RefreshPropertyValues(ctx context.Context, client *citrixclient.CitrixDaasClient, catalog *citrixorchestration.MachineCatalogDetailResponseModel, connectionType *citrixorchestration.HypervisorConnectionType, machines *citrixorchestration.MachineResponseModelCollection) MachineCatalogResourceModel { - // Machine Catalog Properties - r.Id = types.StringValue(catalog.GetId()) - r.Name = types.StringValue(catalog.GetName()) - if catalog.GetDescription() != "" { - r.Description = types.StringValue(catalog.GetDescription()) - } else { - r.Description = types.StringNull() - } - allocationType := catalog.GetAllocationType() - r.AllocationType = types.StringValue(allocationTypeEnumToString(allocationType)) - sessionSupport := catalog.GetSessionSupport() - r.SessionSupport = types.StringValue(reflect.ValueOf(sessionSupport).String()) - - catalogZone := catalog.GetZone() - r.Zone = types.StringValue(catalogZone.GetId()) - - if catalog.UpgradeInfo != nil { - if *catalog.UpgradeInfo.UpgradeType != citrixorchestration.VDAUPGRADETYPE_NOT_SET || !r.VdaUpgradeType.IsNull() { - r.VdaUpgradeType = types.StringValue(string(*catalog.UpgradeInfo.UpgradeType)) - } - } else { - r.VdaUpgradeType = types.StringNull() - } - - provtype := catalog.GetProvisioningType() - r.ProvisioningType = types.StringValue(string(provtype)) - if provtype == citrixorchestration.PROVISIONINGTYPE_MANUAL || !r.IsPowerManaged.IsNull() { - r.IsPowerManaged = types.BoolValue(catalog.GetIsPowerManaged()) - } - - if catalog.ProvisioningType == citrixorchestration.PROVISIONINGTYPE_MANUAL { - // Handle machines - r = r.updateCatalogWithMachines(ctx, client, machines) - } - - r = r.updateCatalogWithRemotePcConfig(catalog) - - if catalog.ProvisioningScheme == nil { - r.ProvisioningScheme = nil - return r - } - - // Provisioning Scheme Properties - - if r.ProvisioningScheme == nil { - r.ProvisioningScheme = &ProvisioningSchemeModel{} - } - - provScheme := catalog.GetProvisioningScheme() - resourcePool := provScheme.GetResourcePool() - hypervisor := resourcePool.GetHypervisor() - machineAccountCreateRules := provScheme.GetMachineAccountCreationRules() - domain := machineAccountCreateRules.GetDomain() - customProperties := provScheme.GetCustomProperties() - - // Refresh Hypervisor and Resource Pool - r.ProvisioningScheme.Hypervisor = types.StringValue(hypervisor.GetId()) - r.ProvisioningScheme.HypervisorResourcePool = types.StringValue(resourcePool.GetId()) - - switch *connectionType { - case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: - if r.ProvisioningScheme.AzureMachineConfig == nil { - r.ProvisioningScheme.AzureMachineConfig = &AzureMachineConfigModel{} - } - - r.ProvisioningScheme.AzureMachineConfig.RefreshProperties(*catalog) - - for _, stringPair := range customProperties { - if stringPair.GetName() == "Zones" && !r.ProvisioningScheme.AvailabilityZones.IsNull() { - r.ProvisioningScheme.AvailabilityZones = types.StringValue(stringPair.GetValue()) - } - } - case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: - if r.ProvisioningScheme.AwsMachineConfig == nil { - r.ProvisioningScheme.AwsMachineConfig = &AwsMachineConfigModel{} - } - r.ProvisioningScheme.AwsMachineConfig.RefreshProperties(*catalog) - - for _, stringPair := range customProperties { - if stringPair.GetName() == "Zones" { - r.ProvisioningScheme.AvailabilityZones = types.StringValue(stringPair.GetValue()) - } - } - case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: - if r.ProvisioningScheme.GcpMachineConfig == nil { - r.ProvisioningScheme.GcpMachineConfig = &GcpMachineConfigModel{} - } - - r.ProvisioningScheme.GcpMachineConfig.RefreshProperties(*catalog) - - for _, stringPair := range customProperties { - if stringPair.GetName() == "CatalogZones" && !r.ProvisioningScheme.AvailabilityZones.IsNull() { - r.ProvisioningScheme.AvailabilityZones = types.StringValue(stringPair.GetValue()) - } - } - } - - // Refresh Total Machine Count - r.ProvisioningScheme.NumTotalMachines = types.Int64Value(int64(provScheme.GetMachineCount())) - - // Refresh Total Machine Count - if identityType := types.StringValue(reflect.ValueOf(provScheme.GetIdentityType()).String()); identityType.ValueString() != "" { - r.ProvisioningScheme.IdentityType = identityType - } else { - r.ProvisioningScheme.IdentityType = types.StringNull() - } - - // Refresh Network Mapping - networkMaps := provScheme.GetNetworkMaps() - - if len(networkMaps) > 0 && r.ProvisioningScheme.NetworkMapping != nil { - r.ProvisioningScheme.NetworkMapping = &NetworkMappingModel{} - r.ProvisioningScheme.NetworkMapping.NetworkDevice = types.StringValue(networkMaps[0].GetDeviceId()) - network := networkMaps[0].GetNetwork() - segments := strings.Split(network.GetXDPath(), "\\") - lastIndex := len(segments) - r.ProvisioningScheme.NetworkMapping.Network = types.StringValue(strings.Split((strings.Split(segments[lastIndex-1], "."))[0], " ")[0]) - } else { - r.ProvisioningScheme.NetworkMapping = nil - } - - // Identity Pool Properties - if r.ProvisioningScheme.MachineAccountCreationRules == nil { - r.ProvisioningScheme.MachineAccountCreationRules = &MachineAccountCreationRulesModel{} - } - r.ProvisioningScheme.MachineAccountCreationRules.NamingScheme = types.StringValue(machineAccountCreateRules.GetNamingScheme()) - namingSchemeType := machineAccountCreateRules.GetNamingSchemeType() - r.ProvisioningScheme.MachineAccountCreationRules.NamingSchemeType = types.StringValue(reflect.ValueOf(namingSchemeType).String()) - - // Domain Identity Properties - if r.ProvisioningScheme.MachineDomainIdentity == nil { - r.ProvisioningScheme.MachineDomainIdentity = &MachineDomainIdentityModel{} - } - - if domain.GetName() != "" { - r.ProvisioningScheme.MachineDomainIdentity.Domain = types.StringValue(domain.GetName()) - } - if machineAccountCreateRules.GetOU() != "" { - r.ProvisioningScheme.MachineDomainIdentity.Ou = types.StringValue(machineAccountCreateRules.GetOU()) - } - - return r -} - -func allocationTypeEnumToString(conn citrixorchestration.AllocationType) string { - switch conn { - case citrixorchestration.ALLOCATIONTYPE_UNKNOWN: - return "Unknown" - case citrixorchestration.ALLOCATIONTYPE_RANDOM: - return "Random" - case citrixorchestration.ALLOCATIONTYPE_STATIC: - return "Static" - default: - return "" - } -} - -func ParseCustomPropertiesToClientModel(provisioningScheme ProvisioningSchemeModel, connectionType citrixorchestration.HypervisorConnectionType) []citrixorchestration.NameValueStringPairModel { - var res = &[]citrixorchestration.NameValueStringPairModel{} - switch connectionType { - case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: - if !provisioningScheme.AvailabilityZones.IsNull() { - util.AppendNameValueStringPair(res, "Zones", provisioningScheme.AvailabilityZones.ValueString()) - } else { - util.AppendNameValueStringPair(res, "Zones", "") - } - if !provisioningScheme.AzureMachineConfig.StorageType.IsNull() { - util.AppendNameValueStringPair(res, "StorageType", provisioningScheme.AzureMachineConfig.StorageType.ValueString()) - } - if !provisioningScheme.AzureMachineConfig.VdaResourceGroup.IsNull() { - util.AppendNameValueStringPair(res, "ResourceGroups", provisioningScheme.AzureMachineConfig.VdaResourceGroup.ValueString()) - } - if !provisioningScheme.AzureMachineConfig.UseManagedDisks.IsNull() { - if provisioningScheme.AzureMachineConfig.UseManagedDisks.ValueBool() { - util.AppendNameValueStringPair(res, "UseManagedDisks", "true") - } else { - util.AppendNameValueStringPair(res, "UseManagedDisks", "false") - } - } - if provisioningScheme.AzureMachineConfig.WritebackCache != nil { - if !provisioningScheme.AzureMachineConfig.WritebackCache.WBCDiskStorageType.IsNull() { - util.AppendNameValueStringPair(res, "WBCDiskStorageType", provisioningScheme.AzureMachineConfig.WritebackCache.WBCDiskStorageType.ValueString()) - } - if provisioningScheme.AzureMachineConfig.WritebackCache.PersistWBC.ValueBool() { - util.AppendNameValueStringPair(res, "PersistWBC", "true") - if provisioningScheme.AzureMachineConfig.WritebackCache.StorageCostSaving.ValueBool() { - util.AppendNameValueStringPair(res, "StorageTypeAtShutdown", "Standard_LRS") - } - } - if provisioningScheme.AzureMachineConfig.WritebackCache.PersistOsDisk.ValueBool() { - util.AppendNameValueStringPair(res, "PersistOsDisk", "true") - if provisioningScheme.AzureMachineConfig.WritebackCache.PersistVm.ValueBool() { - util.AppendNameValueStringPair(res, "PersistVm", "true") - } - } - } - case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: - if !provisioningScheme.AvailabilityZones.IsNull() { - util.AppendNameValueStringPair(res, "Zones", provisioningScheme.AvailabilityZones.ValueString()) - } - case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: - if !provisioningScheme.AvailabilityZones.IsNull() { - util.AppendNameValueStringPair(res, "CatalogZones", provisioningScheme.AvailabilityZones.ValueString()) - } - if !provisioningScheme.GcpMachineConfig.StorageType.IsNull() { - util.AppendNameValueStringPair(res, "StorageType", provisioningScheme.GcpMachineConfig.StorageType.ValueString()) - } - if provisioningScheme.GcpMachineConfig.WritebackCache != nil { - if !provisioningScheme.GcpMachineConfig.WritebackCache.WBCDiskStorageType.IsNull() { - util.AppendNameValueStringPair(res, "WBCDiskStorageType", provisioningScheme.GcpMachineConfig.WritebackCache.WBCDiskStorageType.ValueString()) - } - if provisioningScheme.GcpMachineConfig.WritebackCache.PersistWBC.ValueBool() { - util.AppendNameValueStringPair(res, "PersistWBC", "true") - } - if provisioningScheme.GcpMachineConfig.WritebackCache.PersistOsDisk.ValueBool() { - util.AppendNameValueStringPair(res, "PersistOsDisk", "true") - } - } - } - - return *res -} - -func ParseNetworkMappingToClientModel(networkMapping NetworkMappingModel, resourcePool *citrixorchestration.HypervisorResourcePoolDetailResponseModel) ([]citrixorchestration.NetworkMapRequestModel, error) { - var networks []citrixorchestration.HypervisorResourceRefResponseModel - if resourcePool.ConnectionType == citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM { - networks = resourcePool.Subnets - } else if resourcePool.ConnectionType == citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS || resourcePool.ConnectionType == citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM { - networks = resourcePool.Networks - } - - var res = []citrixorchestration.NetworkMapRequestModel{} - var networkName string - if resourcePool.ConnectionType == citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM || resourcePool.ConnectionType == citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM { - networkName = networkMapping.Network.ValueString() - } else if resourcePool.ConnectionType == citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS { - networkName = fmt.Sprintf("%s (%s)", networkMapping.Network.ValueString(), resourcePool.GetResourcePoolRootId()) - } - network := slices.IndexFunc(networks, func(c citrixorchestration.HypervisorResourceRefResponseModel) bool { return c.GetName() == networkName }) - if network == -1 { - return res, fmt.Errorf("network %s not found", networkName) - } - - res = append(res, citrixorchestration.NetworkMapRequestModel{ - NetworkDeviceNameOrId: *citrixorchestration.NewNullableString(networkMapping.NetworkDevice.ValueStringPointer()), - NetworkPath: networks[network].GetXDPath(), - }) - return res, nil -} - -func (r MachineCatalogResourceModel) updateCatalogWithMachines(ctx context.Context, client *citrixclient.CitrixDaasClient, machines *citrixorchestration.MachineResponseModelCollection) MachineCatalogResourceModel { - if machines == nil { - r.MachineAccounts = nil - return r - } - - machineMapFromRemote := map[string]citrixorchestration.MachineResponseModel{} - for _, machine := range machines.GetItems() { - machineMapFromRemote[strings.ToLower(machine.GetName())] = machine - } - - if r.MachineAccounts != nil { - machinesNotPresetInRemote := map[string]bool{} - for _, machineAccount := range r.MachineAccounts { - for _, machineFromPlan := range machineAccount.Machines { - machineFromPlanName := machineFromPlan.MachineName.ValueString() - machineFromRemote, exists := machineMapFromRemote[strings.ToLower(machineFromPlanName)] - if !exists { - machinesNotPresetInRemote[strings.ToLower(machineFromPlanName)] = true - continue - } - - hosting := machineFromRemote.GetHosting() - hypervisor := hosting.GetHypervisorConnection() - hypervisorId := hypervisor.GetId() - - if !strings.EqualFold(hypervisorId, machineAccount.Hypervisor.ValueString()) { - machinesNotPresetInRemote[strings.ToLower(machineFromPlanName)] = true - continue - } - - if hypervisorId == "" { - delete(machineMapFromRemote, strings.ToLower(machineFromPlanName)) - continue - } - - hyp, err := util.GetHypervisor(ctx, client, nil, hypervisorId) - if err != nil { - machinesNotPresetInRemote[strings.ToLower(machineFromPlanName)] = true - continue - } - - connectionType := hyp.GetConnectionType() - hostedMachineId := hosting.GetHostedMachineId() - switch connectionType { - case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: - if hostedMachineId != "" { - resourceGroupName := strings.Split(hostedMachineId, "/")[0] // hosted machine id is resourcegroupname/vmname - if !strings.EqualFold(machineFromPlan.ResourceGroupName.ValueString(), resourceGroupName) { - machineFromPlan.ResourceGroupName = types.StringValue(resourceGroupName) - } - } - case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: - if hostedMachineId != "" { - machineIdArray := strings.Split(hostedMachineId, ":") // hosted machine id is projectname:region:vmname - if !strings.EqualFold(machineFromPlan.Region.ValueString(), machineIdArray[1]) { - machineFromPlan.Region = types.StringValue(machineIdArray[1]) - } - } - // case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: AvailabilityZone is not available from remote - } - - delete(machineMapFromRemote, strings.ToLower(machineFromPlanName)) - } - } - - machineAccounts := []MachineAccountsModel{} - for _, machineAccount := range r.MachineAccounts { - machines := []MachineCatalogMachineModel{} - for _, machine := range machineAccount.Machines { - if machinesNotPresetInRemote[strings.ToLower(machine.MachineName.ValueString())] { - continue - } - machines = append(machines, machine) - } - machineAccount.Machines = machines - machineAccounts = append(machineAccounts, machineAccount) - } - - r.MachineAccounts = machineAccounts - } - - // go over any machines that are in remote but were not in plan - newMachines := map[string][]MachineCatalogMachineModel{} - for machineName, machineFromRemote := range machineMapFromRemote { - hosting := machineFromRemote.GetHosting() - hypConnection := hosting.GetHypervisorConnection() - hypId := hypConnection.GetId() - - var machineModel MachineCatalogMachineModel - machineModel.MachineName = types.StringValue(machineName) - - if hypId != "" { - hyp, err := util.GetHypervisor(ctx, client, nil, hypId) - if err != nil { - continue - } - - connectionType := hyp.GetConnectionType() - hostedMachineId := hosting.GetHostedMachineId() - switch connectionType { - case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: - if hostedMachineId != "" { - resourceGroupName := strings.Split(hostedMachineId, "/")[0] // hosted machine id is resourcegroupname/vmname - machineModel.ResourceGroupName = types.StringValue(resourceGroupName) - // region is not available from remote - } - case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: - if hostedMachineId != "" { - machineIdArray := strings.Split(hostedMachineId, ":") // hosted machine id is projectname:region:vmname - machineModel.ProjectName = types.StringValue(machineIdArray[0]) - machineModel.Region = types.StringValue(machineIdArray[1]) - } - // case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: AvailabilityZone is not available from remote - } - } - - _, exists := newMachines[hypId] - if !exists { - newMachines[hypId] = []MachineCatalogMachineModel{} - } - - newMachines[hypId] = append(newMachines[hypId], machineModel) - } - - if len(newMachines) > 0 && r.MachineAccounts == nil { - r.MachineAccounts = []MachineAccountsModel{} - } - - machineAccountMap := map[string]int{} - for index, machineAccount := range r.MachineAccounts { - machineAccountMap[machineAccount.Hypervisor.ValueString()] = index - } - - for hypId, machines := range newMachines { - machineAccIndex, exists := machineAccountMap[hypId] - if exists { - machAccounts := r.MachineAccounts - machineAccount := machAccounts[machineAccIndex] - if machineAccount.Machines == nil { - machineAccount.Machines = []MachineCatalogMachineModel{} - } - machineAccountMachines := machineAccount.Machines - machineAccountMachines = append(machineAccountMachines, machines...) - machineAccount.Machines = machineAccountMachines - machAccounts[machineAccIndex] = machineAccount - r.MachineAccounts = machAccounts - continue - } - var machineAccount MachineAccountsModel - machineAccount.Hypervisor = types.StringValue(hypId) - machineAccount.Machines = machines - machAccounts := r.MachineAccounts - machAccounts = append(machAccounts, machineAccount) - machineAccountMap[hypId] = len(machAccounts) - 1 - r.MachineAccounts = machAccounts - } - - return r -} - -func (r MachineCatalogResourceModel) updateCatalogWithRemotePcConfig(catalog *citrixorchestration.MachineCatalogDetailResponseModel) MachineCatalogResourceModel { - if catalog.GetProvisioningType() == citrixorchestration.PROVISIONINGTYPE_MANUAL || !r.IsRemotePc.IsNull() { - r.IsRemotePc = types.BoolValue(catalog.GetIsRemotePC()) - } - rpcOUs := util.RefreshListProperties[RemotePcOuModel, citrixorchestration.RemotePCEnrollmentScopeResponseModel](r.RemotePcOus, "OUName", catalog.GetRemotePCEnrollmentScopes(), "OU", "RefreshListItem") - r.RemotePcOus = rpcOUs - return r -} - -func (scope RemotePcOuModel) RefreshListItem(remote citrixorchestration.RemotePCEnrollmentScopeResponseModel) RemotePcOuModel { - scope.OUName = types.StringValue(remote.GetOU()) - scope.IncludeSubFolders = types.BoolValue(remote.GetIncludeSubfolders()) - - return scope -} diff --git a/internal/daas/data_sources/vda/vda_data_source.go b/internal/daas/vda/vda_data_source.go similarity index 98% rename from internal/daas/data_sources/vda/vda_data_source.go rename to internal/daas/vda/vda_data_source.go index 63e54a1..bbae55d 100644 --- a/internal/daas/data_sources/vda/vda_data_source.go +++ b/internal/daas/vda/vda_data_source.go @@ -30,7 +30,7 @@ type VdaDataSource struct { } func (d *VdaDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_daas_vda" + resp.TypeName = req.ProviderTypeName + "_vda" } func (d *VdaDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { diff --git a/internal/daas/data_sources/vda/vda_data_source_model.go b/internal/daas/vda/vda_data_source_model.go similarity index 100% rename from internal/daas/data_sources/vda/vda_data_source_model.go rename to internal/daas/vda/vda_data_source_model.go diff --git a/internal/daas/resources/zone/zone_resource.go b/internal/daas/zone/zone_resource.go similarity index 99% rename from internal/daas/resources/zone/zone_resource.go rename to internal/daas/zone/zone_resource.go index 383dc94..1750868 100644 --- a/internal/daas/resources/zone/zone_resource.go +++ b/internal/daas/zone/zone_resource.go @@ -40,7 +40,7 @@ type zoneResource struct { // Metadata returns the resource type name. func (r *zoneResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_daas_zone" + resp.TypeName = req.ProviderTypeName + "_zone" } // Schema defines the schema for the resource. diff --git a/internal/daas/resources/zone/zone_resource_model.go b/internal/daas/zone/zone_resource_model.go similarity index 100% rename from internal/daas/resources/zone/zone_resource_model.go rename to internal/daas/zone/zone_resource_model.go diff --git a/internal/examples/data-sources/citrix_daas_admin_scope/data-source.tf b/internal/examples/data-sources/citrix_admin_scope/data-source.tf similarity index 56% rename from internal/examples/data-sources/citrix_daas_admin_scope/data-source.tf rename to internal/examples/data-sources/citrix_admin_scope/data-source.tf index 019dae2..c27286c 100644 --- a/internal/examples/data-sources/citrix_daas_admin_scope/data-source.tf +++ b/internal/examples/data-sources/citrix_admin_scope/data-source.tf @@ -1,9 +1,9 @@ # Get Admin Scope resource by name -data "citrix_daas_admin_scope" "test_scope_by_name" { +data "citrix_admin_scope" "test_scope_by_name" { name = "All" } # Get Admin Scope resource by id -data "citrix_daas_admin_scope" "test_scope_by_id" { +data "citrix_admin_scope" "test_scope_by_id" { id = "00000000-0000-0000-0000-000000000000" } \ No newline at end of file diff --git a/internal/examples/data-sources/citrix_daas_vda/data-source.tf b/internal/examples/data-sources/citrix_vda/data-source.tf similarity index 67% rename from internal/examples/data-sources/citrix_daas_vda/data-source.tf rename to internal/examples/data-sources/citrix_vda/data-source.tf index dab9cda..0f786c9 100644 --- a/internal/examples/data-sources/citrix_daas_vda/data-source.tf +++ b/internal/examples/data-sources/citrix_vda/data-source.tf @@ -1,9 +1,9 @@ # Get VDA resource by machine catalog Name or Id -data "citrix_daas_vda" "vda_by_machine_catalog" { +data "citrix_vda" "vda_by_machine_catalog" { machine_catalog = "{MachineCatalog Name or Id}" } # Get VDA resource by delivery group Name or Id -data "citrix_daas_vda" "vda_by_delivery_group" { +data "citrix_vda" "vda_by_delivery_group" { delivery_group = "{DeliveryGroup Name or Id}" } \ No newline at end of file diff --git a/internal/examples/resources/citrix_admin_role/import.sh b/internal/examples/resources/citrix_admin_role/import.sh new file mode 100644 index 0000000..d1a15d2 --- /dev/null +++ b/internal/examples/resources/citrix_admin_role/import.sh @@ -0,0 +1,2 @@ +# Admin Role can be imported by specifying the GUID +terraform import citrix_admin_role.example-admin-role 00000000-0000-0000-0000-000000000000 \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_admin_role/resource.tf b/internal/examples/resources/citrix_admin_role/resource.tf similarity index 78% rename from internal/examples/resources/citrix_daas_admin_role/resource.tf rename to internal/examples/resources/citrix_admin_role/resource.tf index fe93690..f511c7e 100644 --- a/internal/examples/resources/citrix_daas_admin_role/resource.tf +++ b/internal/examples/resources/citrix_admin_role/resource.tf @@ -1,10 +1,10 @@ -resource "citrix_daas_admin_role" "on_prem_example_role" { +resource "citrix_admin_role" "on_prem_example_role" { name = "on_prem_admin_role" description = "Example admin role for citrix onprem" permissions = ["AppGroupApplications_ChangeTags"] } -resource "citrix_daas_admin_role" "cloud_example_role" { +resource "citrix_admin_role" "cloud_example_role" { name = "cloud_admin_role" can_launch_manage = false can_launch_monitor = true diff --git a/internal/examples/resources/citrix_admin_scope/import.sh b/internal/examples/resources/citrix_admin_scope/import.sh new file mode 100644 index 0000000..ea3e45d --- /dev/null +++ b/internal/examples/resources/citrix_admin_scope/import.sh @@ -0,0 +1,2 @@ +# Admin Scope can be imported by specifying the GUID +terraform import citrix_admin_scope.example-admin-scope 00000000-0000-0000-0000-000000000000 \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_admin_scope/resource.tf b/internal/examples/resources/citrix_admin_scope/resource.tf similarity index 88% rename from internal/examples/resources/citrix_daas_admin_scope/resource.tf rename to internal/examples/resources/citrix_admin_scope/resource.tf index 279d727..ba453d0 100644 --- a/internal/examples/resources/citrix_daas_admin_scope/resource.tf +++ b/internal/examples/resources/citrix_admin_scope/resource.tf @@ -1,4 +1,4 @@ -resource "citrix_daas_admin_scope" "example-admin-scope" { +resource "citrix_admin_scope" "example-admin-scope" { name = "example-admin-scope" description = "Example admin scope for delivery group and machine catalog" scoped_objects = [ diff --git a/internal/examples/resources/citrix_application/import.sh b/internal/examples/resources/citrix_application/import.sh new file mode 100644 index 0000000..92ba87a --- /dev/null +++ b/internal/examples/resources/citrix_application/import.sh @@ -0,0 +1,2 @@ +# Application can be imported by specifying the GUID +terraform import citrix_application.example-application b620d505-0d0d-43b1-8c94-5cb21c5ab40d \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_application/resource.tf b/internal/examples/resources/citrix_application/resource.tf similarity index 63% rename from internal/examples/resources/citrix_daas_application/resource.tf rename to internal/examples/resources/citrix_application/resource.tf index 52496ab..1af0156 100644 --- a/internal/examples/resources/citrix_daas_application/resource.tf +++ b/internal/examples/resources/citrix_application/resource.tf @@ -1,12 +1,12 @@ -resource "citrix_daas_application" "example-application" { +resource "citrix_application" "example-application" { name = "example-name" description = "example-description" published_name = "example-published-name" - application_folder_path = citrix_daas_application_folder.example-application-folder-1.path + application_folder_path = citrix_application_folder.example-application-folder-1.path installed_app_properties = { command_line_arguments = "" command_line_executable = "" working_directory = "" } - delivery_groups = [citrix_daas_delivery_group.example-delivery-group.id] + delivery_groups = [citrix_delivery_group.example-delivery-group.id] } diff --git a/internal/examples/resources/citrix_application_folder/import.sh b/internal/examples/resources/citrix_application_folder/import.sh new file mode 100644 index 0000000..a76d01f --- /dev/null +++ b/internal/examples/resources/citrix_application_folder/import.sh @@ -0,0 +1,2 @@ +# Application Folder can be imported by specifying the GUID +terraform import citrix_application_folder.example-application-folder-1 cd0a00da-dda8-4ba6-a686-936f2c7a3adf \ No newline at end of file diff --git a/internal/examples/resources/citrix_application_folder/resource.tf b/internal/examples/resources/citrix_application_folder/resource.tf new file mode 100644 index 0000000..ee63bbb --- /dev/null +++ b/internal/examples/resources/citrix_application_folder/resource.tf @@ -0,0 +1,8 @@ +resource "citrix_application_folder" "example-application-folder-1" { + name = "example-application-folder-1" +} + +resource "citrix_application_folder" "example-application-folder-2" { + name = "example-application-folder-2" + parent_path = citrix_application_folder.example-application-folder-1.path +} diff --git a/internal/examples/resources/citrix_aws_hypervisor/import.sh b/internal/examples/resources/citrix_aws_hypervisor/import.sh new file mode 100644 index 0000000..751cd47 --- /dev/null +++ b/internal/examples/resources/citrix_aws_hypervisor/import.sh @@ -0,0 +1,2 @@ +# AWS Hypervisor can be imported by specifying the GUID +terraform import citrix_aws_hypervisor.example-aws-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_aws_hypervisor/resource.tf b/internal/examples/resources/citrix_aws_hypervisor/resource.tf similarity index 78% rename from internal/examples/resources/citrix_daas_aws_hypervisor/resource.tf rename to internal/examples/resources/citrix_aws_hypervisor/resource.tf index 7ce2261..e0d19c0 100644 --- a/internal/examples/resources/citrix_daas_aws_hypervisor/resource.tf +++ b/internal/examples/resources/citrix_aws_hypervisor/resource.tf @@ -1,5 +1,5 @@ # AWS Hypervisor -resource "citrix_daas_aws_hypervisor" "example-aws-hypervisor" { +resource "citrix_aws_hypervisor" "example-aws-hypervisor" { name = "example-aws-hypervisor" zone = "" api_key = "" diff --git a/internal/examples/resources/citrix_aws_hypervisor_resource_pool/import.sh b/internal/examples/resources/citrix_aws_hypervisor_resource_pool/import.sh new file mode 100644 index 0000000..7c55d34 --- /dev/null +++ b/internal/examples/resources/citrix_aws_hypervisor_resource_pool/import.sh @@ -0,0 +1,2 @@ +# Hypervisor Resource Pool can be imported with the format HypervisorId,HypervisorResourcePoolId +terraform import citrix_aws_hypervisor_resource_pool.example-aws-hypervisor-resource-pool sbf0dc45-5c42-45a0-a15d-a3df4ff5da8c,ce571dd9-1a46-4b85-891c-484423322c53 \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_aws_hypervisor_resource_pool/resource.tf b/internal/examples/resources/citrix_aws_hypervisor_resource_pool/resource.tf similarity index 51% rename from internal/examples/resources/citrix_daas_aws_hypervisor_resource_pool/resource.tf rename to internal/examples/resources/citrix_aws_hypervisor_resource_pool/resource.tf index e0d30de..5f93e7b 100644 --- a/internal/examples/resources/citrix_daas_aws_hypervisor_resource_pool/resource.tf +++ b/internal/examples/resources/citrix_aws_hypervisor_resource_pool/resource.tf @@ -1,6 +1,6 @@ -resource "citrix_daas_aws_hypervisor_resource_pool" "example-aws-hypervisor-resource-pool" { +resource "citrix_aws_hypervisor_resource_pool" "example-aws-hypervisor-resource-pool" { name = "example-aws-hypervisor-resource-pool" - hypervisor = citrix_daas_aws_hypervisor.example-aws-hypervisor.id + hypervisor = citrix_aws_hypervisor.example-aws-hypervisor.id subnets = [ "10.0.1.0/24", ] diff --git a/internal/examples/resources/citrix_azure_hypervisor/import.sh b/internal/examples/resources/citrix_azure_hypervisor/import.sh new file mode 100644 index 0000000..ada92be --- /dev/null +++ b/internal/examples/resources/citrix_azure_hypervisor/import.sh @@ -0,0 +1,2 @@ +# Azure Hypervisor can be imported by specifying the GUID +terraform import citrix_azure_hypervisor.example-azure-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_azure_hypervisor/resource.tf b/internal/examples/resources/citrix_azure_hypervisor/resource.tf similarity index 81% rename from internal/examples/resources/citrix_daas_azure_hypervisor/resource.tf rename to internal/examples/resources/citrix_azure_hypervisor/resource.tf index 47c17fd..c7b0e56 100644 --- a/internal/examples/resources/citrix_daas_azure_hypervisor/resource.tf +++ b/internal/examples/resources/citrix_azure_hypervisor/resource.tf @@ -1,5 +1,5 @@ # Azure Hypervisor -resource "citrix_daas_azure_hypervisor" "example-azure-hypervisor" { +resource "citrix_azure_hypervisor" "example-azure-hypervisor" { name = "example-azure-hypervisor" zone = "" active_directory_id = "" diff --git a/internal/examples/resources/citrix_azure_hypervisor_resource_pool/import.sh b/internal/examples/resources/citrix_azure_hypervisor_resource_pool/import.sh new file mode 100644 index 0000000..0294d9e --- /dev/null +++ b/internal/examples/resources/citrix_azure_hypervisor_resource_pool/import.sh @@ -0,0 +1,2 @@ +# Hypervisor Resource Pool can be imported with the format HypervisorId,HypervisorResourcePoolId +terraform import citrix_azure_hypervisor_resource_pool.example-azure-hypervisor-resource-pool sbf0dc45-5c42-45a0-a15d-a3df4ff5da8c,ce571dd9-1a46-4b85-891c-484423322c53 \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_azure_hypervisor_resource_pool/resource.tf b/internal/examples/resources/citrix_azure_hypervisor_resource_pool/resource.tf similarity index 60% rename from internal/examples/resources/citrix_daas_azure_hypervisor_resource_pool/resource.tf rename to internal/examples/resources/citrix_azure_hypervisor_resource_pool/resource.tf index 4bc3b88..59332ff 100644 --- a/internal/examples/resources/citrix_daas_azure_hypervisor_resource_pool/resource.tf +++ b/internal/examples/resources/citrix_azure_hypervisor_resource_pool/resource.tf @@ -1,6 +1,6 @@ -resource "citrix_daas_azure_hypervisor_resource_pool" "example-azure-hypervisor-resource-pool" { +resource "citrix_azure_hypervisor_resource_pool" "example-azure-hypervisor-resource-pool" { name = "example-azure-hypervisor-resource-pool" - hypervisor = citrix_daas_azure_hypervisor.example-azure-hypervisor.id + hypervisor = citrix_azure_hypervisor.example-azure-hypervisor.id region = "East US" virtual_network_resource_group = "" virtual_network = "" diff --git a/internal/examples/resources/citrix_daas_admin_role/import.sh b/internal/examples/resources/citrix_daas_admin_role/import.sh deleted file mode 100644 index d44a98f..0000000 --- a/internal/examples/resources/citrix_daas_admin_role/import.sh +++ /dev/null @@ -1,2 +0,0 @@ -# Admin Role can be imported by specifying the GUID -terraform import citrix_daas_admin_role.example-admin-role 00000000-0000-0000-0000-000000000000 \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_admin_scope/import.sh b/internal/examples/resources/citrix_daas_admin_scope/import.sh deleted file mode 100644 index 95e61e4..0000000 --- a/internal/examples/resources/citrix_daas_admin_scope/import.sh +++ /dev/null @@ -1,2 +0,0 @@ -# Admin Scope can be imported by specifying the GUID -terraform import citrix_daas_admin_scope.example-admin-scope 00000000-0000-0000-0000-000000000000 \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_application/import.sh b/internal/examples/resources/citrix_daas_application/import.sh deleted file mode 100644 index 73e6f9f..0000000 --- a/internal/examples/resources/citrix_daas_application/import.sh +++ /dev/null @@ -1,2 +0,0 @@ -# Application can be imported by specifying the GUID -terraform import citrix_daas_application.example-application b620d505-0d0d-43b1-8c94-5cb21c5ab40d \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_application_folder/import.sh b/internal/examples/resources/citrix_daas_application_folder/import.sh deleted file mode 100644 index 812f2b2..0000000 --- a/internal/examples/resources/citrix_daas_application_folder/import.sh +++ /dev/null @@ -1,2 +0,0 @@ -# Application Folder can be imported by specifying the GUID -terraform import citrix_daas_application_folder.example-application-folder-1 cd0a00da-dda8-4ba6-a686-936f2c7a3adf \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_application_folder/resource.tf b/internal/examples/resources/citrix_daas_application_folder/resource.tf deleted file mode 100644 index 004ac24..0000000 --- a/internal/examples/resources/citrix_daas_application_folder/resource.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "citrix_daas_application_folder" "example-application-folder-1" { - name = "example-application-folder-1" -} - -resource "citrix_daas_application_folder" "example-application-folder-2" { - name = "example-application-folder-2" - parent_path = citrix_daas_application_folder.example-application-folder-1.path -} diff --git a/internal/examples/resources/citrix_daas_aws_hypervisor/import.sh b/internal/examples/resources/citrix_daas_aws_hypervisor/import.sh deleted file mode 100644 index 66d6cb7..0000000 --- a/internal/examples/resources/citrix_daas_aws_hypervisor/import.sh +++ /dev/null @@ -1,2 +0,0 @@ -# AWS Hypervisor can be imported by specifying the GUID -terraform import citrix_daas_aws_hypervisor.example-aws-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_aws_hypervisor_resource_pool/import.sh b/internal/examples/resources/citrix_daas_aws_hypervisor_resource_pool/import.sh deleted file mode 100644 index b344597..0000000 --- a/internal/examples/resources/citrix_daas_aws_hypervisor_resource_pool/import.sh +++ /dev/null @@ -1,2 +0,0 @@ -# Hypervisor Resource Pool can be imported with the format HypervisorId,HypervisorResourcePoolId -terraform import citrix_daas_aws_hypervisor_resource_pool.example-aws-hypervisor-resource-pool sbf0dc45-5c42-45a0-a15d-a3df4ff5da8c,ce571dd9-1a46-4b85-891c-484423322c53 \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_azure_hypervisor/import.sh b/internal/examples/resources/citrix_daas_azure_hypervisor/import.sh deleted file mode 100644 index 5805df9..0000000 --- a/internal/examples/resources/citrix_daas_azure_hypervisor/import.sh +++ /dev/null @@ -1,2 +0,0 @@ -# Azure Hypervisor can be imported by specifying the GUID -terraform import citrix_daas_azure_hypervisor.example-azure-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_azure_hypervisor_resource_pool/import.sh b/internal/examples/resources/citrix_daas_azure_hypervisor_resource_pool/import.sh deleted file mode 100644 index 4fb8f9a..0000000 --- a/internal/examples/resources/citrix_daas_azure_hypervisor_resource_pool/import.sh +++ /dev/null @@ -1,2 +0,0 @@ -# Hypervisor Resource Pool can be imported with the format HypervisorId,HypervisorResourcePoolId -terraform import citrix_daas_azure_hypervisor_resource_pool.example-azure-hypervisor-resource-pool sbf0dc45-5c42-45a0-a15d-a3df4ff5da8c,ce571dd9-1a46-4b85-891c-484423322c53 \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_delivery_group/import.sh b/internal/examples/resources/citrix_daas_delivery_group/import.sh deleted file mode 100644 index d6c15fd..0000000 --- a/internal/examples/resources/citrix_daas_delivery_group/import.sh +++ /dev/null @@ -1,2 +0,0 @@ -# Delivery Group can be imported by specifying the GUID -terraform import citrix_daas_delivery_group.example-delivery-group a92ac0d6-9a0f-477a-a504-07cae8fccb81 \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_gcp_hypervisor/import.sh b/internal/examples/resources/citrix_daas_gcp_hypervisor/import.sh deleted file mode 100644 index 64203be..0000000 --- a/internal/examples/resources/citrix_daas_gcp_hypervisor/import.sh +++ /dev/null @@ -1,2 +0,0 @@ -# Hypervisor can be imported by specifying the GUID -terraform import citrix_daas_gcp_hypervisor.example-gcp-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_gcp_hypervisor_resource_pool/import.sh b/internal/examples/resources/citrix_daas_gcp_hypervisor_resource_pool/import.sh deleted file mode 100644 index 32e11cf..0000000 --- a/internal/examples/resources/citrix_daas_gcp_hypervisor_resource_pool/import.sh +++ /dev/null @@ -1,2 +0,0 @@ -# Hypervisor Resource Pool can be imported with the format HypervisorId,HypervisorResourcePoolId -terraform import citrix_daas_gcp_hypervisor_resource_pool.example-gcp-hypervisor-resource-pool sbf0dc45-5c42-45a0-a15d-a3df4ff5da8c,ce571dd9-1a46-4b85-891c-484423322c53 \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_machine_catalog/import.sh b/internal/examples/resources/citrix_daas_machine_catalog/import.sh deleted file mode 100644 index 8aaeb42..0000000 --- a/internal/examples/resources/citrix_daas_machine_catalog/import.sh +++ /dev/null @@ -1,2 +0,0 @@ -# Machine catalog can be imported by specifying the GUID -terraform import citrix_daas_machine_catalog.example b2339edf-7b00-436e-9c3a-54c987c3526e \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_zone/import.sh b/internal/examples/resources/citrix_daas_zone/import.sh deleted file mode 100644 index f5a3c74..0000000 --- a/internal/examples/resources/citrix_daas_zone/import.sh +++ /dev/null @@ -1,2 +0,0 @@ -# Zone can be imported by specifying the GUID -terraform import citrix_daas_zone.example-zone 06e5981e-dbaf-48db-b134-245fca2dc672 \ No newline at end of file diff --git a/internal/examples/resources/citrix_delivery_group/import.sh b/internal/examples/resources/citrix_delivery_group/import.sh new file mode 100644 index 0000000..12847c0 --- /dev/null +++ b/internal/examples/resources/citrix_delivery_group/import.sh @@ -0,0 +1,2 @@ +# Delivery Group can be imported by specifying the GUID +terraform import citrix_delivery_group.example-delivery-group a92ac0d6-9a0f-477a-a504-07cae8fccb81 \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_delivery_group/resource.tf b/internal/examples/resources/citrix_delivery_group/resource.tf similarity index 93% rename from internal/examples/resources/citrix_daas_delivery_group/resource.tf rename to internal/examples/resources/citrix_delivery_group/resource.tf index ea2fce3..aa2b735 100644 --- a/internal/examples/resources/citrix_daas_delivery_group/resource.tf +++ b/internal/examples/resources/citrix_delivery_group/resource.tf @@ -1,8 +1,8 @@ -resource "citrix_daas_delivery_group" "example-delivery-group" { +resource "citrix_delivery_group" "example-delivery-group" { name = "example-delivery-group" associated_machine_catalogs = [ { - machine_catalog = citrix_daas_machine_catalog.example-azure-mtsession.id + machine_catalog = citrix_machine_catalog.example-azure-mtsession.id machine_count = 1 } ] @@ -99,4 +99,5 @@ resource "citrix_daas_delivery_group" "example-delivery-group" { } ] + policy_set_id = citrix_policy_set.example-policy-set.id } \ No newline at end of file diff --git a/internal/examples/resources/citrix_gcp_hypervisor/import.sh b/internal/examples/resources/citrix_gcp_hypervisor/import.sh new file mode 100644 index 0000000..801059b --- /dev/null +++ b/internal/examples/resources/citrix_gcp_hypervisor/import.sh @@ -0,0 +1,2 @@ +# Hypervisor can be imported by specifying the GUID +terraform import citrix_gcp_hypervisor.example-gcp-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_gcp_hypervisor/resource.tf b/internal/examples/resources/citrix_gcp_hypervisor/resource.tf similarity index 77% rename from internal/examples/resources/citrix_daas_gcp_hypervisor/resource.tf rename to internal/examples/resources/citrix_gcp_hypervisor/resource.tf index ab84c58..9dac36a 100644 --- a/internal/examples/resources/citrix_daas_gcp_hypervisor/resource.tf +++ b/internal/examples/resources/citrix_gcp_hypervisor/resource.tf @@ -1,5 +1,5 @@ # GCP Hypervisor -resource "citrix_daas_gcp_hypervisor" "example-gcp-hypervisor" { +resource "citrix_gcp_hypervisor" "example-gcp-hypervisor" { name = "example-gcp-hypervisor" zone = "" service_account_id = "" diff --git a/internal/examples/resources/citrix_gcp_hypervisor_resource_pool/import.sh b/internal/examples/resources/citrix_gcp_hypervisor_resource_pool/import.sh new file mode 100644 index 0000000..a8cce59 --- /dev/null +++ b/internal/examples/resources/citrix_gcp_hypervisor_resource_pool/import.sh @@ -0,0 +1,2 @@ +# Hypervisor Resource Pool can be imported with the format HypervisorId,HypervisorResourcePoolId +terraform import citrix_gcp_hypervisor_resource_pool.example-gcp-hypervisor-resource-pool sbf0dc45-5c42-45a0-a15d-a3df4ff5da8c,ce571dd9-1a46-4b85-891c-484423322c53 \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_gcp_hypervisor_resource_pool/resource.tf b/internal/examples/resources/citrix_gcp_hypervisor_resource_pool/resource.tf similarity index 57% rename from internal/examples/resources/citrix_daas_gcp_hypervisor_resource_pool/resource.tf rename to internal/examples/resources/citrix_gcp_hypervisor_resource_pool/resource.tf index 2f9ce5b..47c5068 100644 --- a/internal/examples/resources/citrix_daas_gcp_hypervisor_resource_pool/resource.tf +++ b/internal/examples/resources/citrix_gcp_hypervisor_resource_pool/resource.tf @@ -1,6 +1,6 @@ -resource "citrix_daas_gcp_hypervisor_resource_pool" "example-gcp-hypervisor-resource-pool" { +resource "citrix_gcp_hypervisor_resource_pool" "example-gcp-hypervisor-resource-pool" { name = "example-gcp-hypervisor-resource-pool" - hypervisor = citrix_daas_gcp_hypervisor.example-gcp-hypervisor.id + hypervisor = citrix_gcp_hypervisor.example-gcp-hypervisor.id project_name = "10000-example-gcp-project" region = "us-east1" subnets = [ diff --git a/internal/examples/resources/citrix_machine_catalog/import.sh b/internal/examples/resources/citrix_machine_catalog/import.sh new file mode 100644 index 0000000..459a465 --- /dev/null +++ b/internal/examples/resources/citrix_machine_catalog/import.sh @@ -0,0 +1,2 @@ +# Machine catalog can be imported by specifying the GUID +terraform import citrix_machine_catalog.example b2339edf-7b00-436e-9c3a-54c987c3526e \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_machine_catalog/resource.tf b/internal/examples/resources/citrix_machine_catalog/resource.tf similarity index 85% rename from internal/examples/resources/citrix_daas_machine_catalog/resource.tf rename to internal/examples/resources/citrix_machine_catalog/resource.tf index 527108f..65750f7 100644 --- a/internal/examples/resources/citrix_daas_machine_catalog/resource.tf +++ b/internal/examples/resources/citrix_machine_catalog/resource.tf @@ -1,4 +1,4 @@ -resource "citrix_daas_machine_catalog" "example-azure-mtsession" { +resource "citrix_machine_catalog" "example-azure-mtsession" { name = "example-azure-mtsession" description = "Example multi-session catalog on Azure hypervisor" zone = "" @@ -8,8 +8,8 @@ resource "citrix_daas_machine_catalog" "example-azure-mtsession" { is_remote_pc = false provisioning_type = "MCS" provisioning_scheme = { - hypervisor = citrix_daas_azure_hypervisor.example-azure-hypervisor.id - hypervisor_resource_pool = citrix_daas_hypervisor_resource_pool.example-azure-hypervisor-resource-pool.id + hypervisor = citrix_azure_hypervisor.example-azure-hypervisor.id + hypervisor_resource_pool = citrix_hypervisor_resource_pool.example-azure-hypervisor-resource-pool.id identity_type = "ActiveDirectory" machine_domain_identity = { domain = "" @@ -47,7 +47,7 @@ resource "citrix_daas_machine_catalog" "example-azure-mtsession" { } } -resource "citrix_daas_machine_catalog" "example-gcp-mtsession" { +resource "citrix_machine_catalog" "example-gcp-mtsession" { name = "example-gcp-mtsession" description = "Example multi-session catalog on GCP hypervisor" zone = "" @@ -57,8 +57,8 @@ resource "citrix_daas_machine_catalog" "example-gcp-mtsession" { is_remote_pc = false provisioning_type = "MCS" provisioning_scheme = { - hypervisor = citrix_daas_gcp_hypervisor.example-gcp-hypervisor.id - hypervisor_resource_pool = citrix_daas_hypervisor_resource_pool.example-gcp-hypervisor-resource-pool.id + hypervisor = citrix_gcp_hypervisor.example-gcp-hypervisor.id + hypervisor_resource_pool = citrix_hypervisor_resource_pool.example-gcp-hypervisor-resource-pool.id identity_type = "ActiveDirectory" machine_domain_identity = { domain = "" @@ -88,7 +88,7 @@ resource "citrix_daas_machine_catalog" "example-gcp-mtsession" { } } -resource "citrix_daas_machine_catalog" "example-manual-power-managed-mtsession" { +resource "citrix_machine_catalog" "example-manual-power-managed-mtsession" { name = "example-manual-power-managed-mtsession" description = "Example manual power managed multi-session catalog" zone = "" @@ -99,7 +99,7 @@ resource "citrix_daas_machine_catalog" "example-manual-power-managed-mtsession" provisioning_type = "Manual" machine_accounts = [ { - hypervisor = citrix_daas_azure_hypervisor.example-azure-hypervisor.id + hypervisor = citrix_azure_hypervisor.example-azure-hypervisor.id machines = [ { region = "East US" @@ -111,7 +111,7 @@ resource "citrix_daas_machine_catalog" "example-manual-power-managed-mtsession" ] } -resource "citrix_daas_machine_catalog" "example-manual-non-power-managed-mtsession" { +resource "citrix_machine_catalog" "example-manual-non-power-managed-mtsession" { name = "example-manual-non-power-managed-mtsession" description = "Example manual non power managed multi-session catalog" zone = "" @@ -134,7 +134,7 @@ resource "citrix_daas_machine_catalog" "example-manual-non-power-managed-mtsessi ] } -resource "citrix_daas_machine_catalog" "example-remote-pc" { +resource "citrix_machine_catalog" "example-remote-pc" { name = "example-remote-pc-catalog" description = "Example Remote PC catalog" zone = "" diff --git a/internal/examples/resources/citrix_nutanix_hypervisor/import.sh b/internal/examples/resources/citrix_nutanix_hypervisor/import.sh new file mode 100644 index 0000000..e44dc00 --- /dev/null +++ b/internal/examples/resources/citrix_nutanix_hypervisor/import.sh @@ -0,0 +1,2 @@ +# Nutanix Hypervisor can be imported by specifying the GUID +terraform import citrix_nutanix_hypervisor.example-nutanix-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e \ No newline at end of file diff --git a/internal/examples/resources/citrix_nutanix_hypervisor/resource.tf b/internal/examples/resources/citrix_nutanix_hypervisor/resource.tf new file mode 100644 index 0000000..91760d8 --- /dev/null +++ b/internal/examples/resources/citrix_nutanix_hypervisor/resource.tf @@ -0,0 +1,10 @@ +# Nutanix Hypervisor +resource "citrix_nutanix_hypervisor" "example-nutanix-hypervisor" { + name = "example-nutanix-hypervisor" + zone = "" + username = "" + password = "" + password_format = "Plaintext" + addresses = ["10.122.36.26"] + max_absolute_active_actions = 20 +} \ No newline at end of file diff --git a/internal/examples/resources/citrix_policy_set/import.sh b/internal/examples/resources/citrix_policy_set/import.sh new file mode 100644 index 0000000..680ada3 --- /dev/null +++ b/internal/examples/resources/citrix_policy_set/import.sh @@ -0,0 +1,2 @@ +# Policy and Policy Set Association can be imported by specifying the Policy GUID +terraform import citrix_policy_set.example 00000000-0000-0000-0000-000000000000 \ No newline at end of file diff --git a/internal/examples/resources/citrix_policy_set/resource.tf b/internal/examples/resources/citrix_policy_set/resource.tf new file mode 100644 index 0000000..bf2b523 --- /dev/null +++ b/internal/examples/resources/citrix_policy_set/resource.tf @@ -0,0 +1,38 @@ +resource "citrix_policy_set" "example-policy-set" { + name = "example-policy-set" + description = "This is an example policy set description" + type = "DeliveryGroupPolicies" + scopes = [ "All", citrix_admin_scope.example-admin-scope.name ] + policies = [ + { + name = "test-policy-with-priority-0" + description = "Test policy in the example policy set with priority 0" + is_enabled = true + policy_settings = [ + { + name = "AdvanceWarningPeriod" + value = "13:00:00" + use_default = false + }, + ] + policy_filters = [ + { + type = "DesktopGroup" + data = jsonencode({ + "server" = "20.185.46.142" + "uuid" = citrix_policy_set.example-delivery-group.id + }) + is_enabled = true + is_allowed = true + }, + ] + }, + { + name = "test-policy-with-priority-1" + description = "Test policy in the example policy set with priority 1" + is_enabled = false + policy_settings = [] + policy_filters = [] + } + ] +} diff --git a/internal/examples/resources/citrix_vsphere_hypervisor/import.sh b/internal/examples/resources/citrix_vsphere_hypervisor/import.sh new file mode 100644 index 0000000..fcdca07 --- /dev/null +++ b/internal/examples/resources/citrix_vsphere_hypervisor/import.sh @@ -0,0 +1,2 @@ +# Vsphere Hypervisor can be imported by specifying the GUID +terraform import citrix_vsphere_hypervisor.example-vsphere-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e \ No newline at end of file diff --git a/internal/examples/resources/citrix_vsphere_hypervisor/resource.tf b/internal/examples/resources/citrix_vsphere_hypervisor/resource.tf new file mode 100644 index 0000000..120ce42 --- /dev/null +++ b/internal/examples/resources/citrix_vsphere_hypervisor/resource.tf @@ -0,0 +1,10 @@ +# Vsphere Hypervisor +resource "citrix_vsphere_hypervisor" "example-vsphere-hypervisor" { + name = "example-vsphere-hypervisor" + zone = "" + username = "" + password = "" + password_format = "Plaintext" + addresses = ["https://10.36.122.45"] + max_absolute_active_actions = 20 +} \ No newline at end of file diff --git a/internal/examples/resources/citrix_xenserver_hypervisor/import.sh b/internal/examples/resources/citrix_xenserver_hypervisor/import.sh new file mode 100644 index 0000000..15540be --- /dev/null +++ b/internal/examples/resources/citrix_xenserver_hypervisor/import.sh @@ -0,0 +1,2 @@ +# Hypervisor can be imported by specifying the GUID +terraform import citrix_xenserver_hypervisor.example-xenserver-hypervisor b2339edf-7b00-436e-9c3a-54c987c3526e \ No newline at end of file diff --git a/internal/examples/resources/citrix_xenserver_hypervisor/resource.tf b/internal/examples/resources/citrix_xenserver_hypervisor/resource.tf new file mode 100644 index 0000000..e537ad0 --- /dev/null +++ b/internal/examples/resources/citrix_xenserver_hypervisor/resource.tf @@ -0,0 +1,14 @@ +# XenServer Hypervisor +resource "citrix_xenserver_hypervisor" "example-xenserver-hypervisor" { + name = "example-xenserver-hypervisor" + zone = "" + username = "" + password = "" + password_format = "PlainText" + addresses = [ + "http://" + ] + ssl_thumbprints = [ + "" + ] +} \ No newline at end of file diff --git a/internal/examples/resources/citrix_xenserver_hypervisor_resource_pool/import.sh b/internal/examples/resources/citrix_xenserver_hypervisor_resource_pool/import.sh new file mode 100644 index 0000000..ee42a97 --- /dev/null +++ b/internal/examples/resources/citrix_xenserver_hypervisor_resource_pool/import.sh @@ -0,0 +1,2 @@ +# Hypervisor Resource Pool can be imported with the format HypervisorId,HypervisorResourcePoolId +terraform import citrix_xenserver_hypervisor_resource_pool.example-xenserver-hypervisor-resource-pool sbf0dc45-5c42-45a0-a15d-a3df4ff5da8c,ce571dd9-1a46-4b85-891c-484423322c53 \ No newline at end of file diff --git a/internal/examples/resources/citrix_xenserver_hypervisor_resource_pool/resource.tf b/internal/examples/resources/citrix_xenserver_hypervisor_resource_pool/resource.tf new file mode 100644 index 0000000..4ddcef7 --- /dev/null +++ b/internal/examples/resources/citrix_xenserver_hypervisor_resource_pool/resource.tf @@ -0,0 +1,15 @@ +resource "citrix_xenserver_hypervisor_resource_pool" "example-xenserver-hypervisor-resource-pool" { + name = "example-xenserver-hypervisor-resource-pool" + hypervisor = citrix_xenserver_hypervisor.example-xenserver-hypervisor.id + networks = [ + "", + "" + ] + storage = [ + "" + ] + temporary_storage = [ + "" + ] + use_local_storage_caching = false +} \ No newline at end of file diff --git a/internal/examples/resources/citrix_zone/import.sh b/internal/examples/resources/citrix_zone/import.sh new file mode 100644 index 0000000..d798ef3 --- /dev/null +++ b/internal/examples/resources/citrix_zone/import.sh @@ -0,0 +1,2 @@ +# Zone can be imported by specifying the GUID +terraform import citrix_zone.example-zone 06e5981e-dbaf-48db-b134-245fca2dc672 \ No newline at end of file diff --git a/internal/examples/resources/citrix_daas_zone/resource.tf b/internal/examples/resources/citrix_zone/resource.tf similarity index 81% rename from internal/examples/resources/citrix_daas_zone/resource.tf rename to internal/examples/resources/citrix_zone/resource.tf index 4170daf..baaca3f 100644 --- a/internal/examples/resources/citrix_daas_zone/resource.tf +++ b/internal/examples/resources/citrix_zone/resource.tf @@ -1,4 +1,4 @@ -resource "citrix_daas_zone" "example-zone" { +resource "citrix_zone" "example-zone" { name = "example-zone" description = "zone example" metadata = [ diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 1dbd4ef..8075795 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -19,19 +19,17 @@ import ( "time" citrixclient "github.com/citrix/citrix-daas-rest-go/client" - "github.com/citrix/terraform-provider-citrix/internal/daas/data_sources/application_folder_details" - "github.com/citrix/terraform-provider-citrix/internal/daas/data_sources/vda" - "github.com/citrix/terraform-provider-citrix/internal/daas/resources/admin_role" - "github.com/citrix/terraform-provider-citrix/internal/daas/resources/application" - "github.com/citrix/terraform-provider-citrix/internal/daas/resources/application_folder" - - admin_scope_data_source "github.com/citrix/terraform-provider-citrix/internal/daas/data_sources/admin_scope" - "github.com/citrix/terraform-provider-citrix/internal/daas/resources/admin_scope" - "github.com/citrix/terraform-provider-citrix/internal/daas/resources/delivery_group" - "github.com/citrix/terraform-provider-citrix/internal/daas/resources/hypervisor" - "github.com/citrix/terraform-provider-citrix/internal/daas/resources/hypervisor_resource_pool" - "github.com/citrix/terraform-provider-citrix/internal/daas/resources/machine_catalog" - "github.com/citrix/terraform-provider-citrix/internal/daas/resources/zone" + "github.com/citrix/terraform-provider-citrix/internal/daas/admin_role" + "github.com/citrix/terraform-provider-citrix/internal/daas/application" + "github.com/citrix/terraform-provider-citrix/internal/daas/vda" + + "github.com/citrix/terraform-provider-citrix/internal/daas/admin_scope" + "github.com/citrix/terraform-provider-citrix/internal/daas/delivery_group" + "github.com/citrix/terraform-provider-citrix/internal/daas/hypervisor" + "github.com/citrix/terraform-provider-citrix/internal/daas/hypervisor_resource_pool" + "github.com/citrix/terraform-provider-citrix/internal/daas/machine_catalog" + "github.com/citrix/terraform-provider-citrix/internal/daas/policies" + "github.com/citrix/terraform-provider-citrix/internal/daas/zone" "github.com/citrix/terraform-provider-citrix/internal/util" "github.com/google/uuid" @@ -542,8 +540,8 @@ func (p *citrixProvider) Configure(ctx context.Context, req provider.ConfigureRe func (p *citrixProvider) DataSources(_ context.Context) []func() datasource.DataSource { return []func() datasource.DataSource{ vda.NewVdaDataSource, - application_folder_details.NewApplicationDataSourceSource, - admin_scope_data_source.NewAdminScopeDataSource, + application.NewApplicationDataSourceSource, + admin_scope.NewAdminScopeDataSource, } } @@ -554,15 +552,19 @@ func (p *citrixProvider) Resources(_ context.Context) []func() resource.Resource hypervisor.NewAzureHypervisorResource, hypervisor.NewAwsHypervisorResource, hypervisor.NewGcpHypervisorResource, + hypervisor.NewVsphereHypervisorResource, + hypervisor.NewXenserverHypervisorResource, + hypervisor.NewNutanixHypervisorResource, hypervisor_resource_pool.NewAzureHypervisorResourcePoolResource, hypervisor_resource_pool.NewAwsHypervisorResourcePoolResource, hypervisor_resource_pool.NewGcpHypervisorResourcePoolResource, + hypervisor_resource_pool.NewXenserverHypervisorResourcePoolResource, machine_catalog.NewMachineCatalogResource, delivery_group.NewDeliveryGroupResource, application.NewApplicationResource, - application_folder.NewApplicationFolderResource, + application.NewApplicationFolderResource, admin_scope.NewAdminScopeResource, admin_role.NewAdminRoleResource, - //Add resource here + policies.NewPolicySetResource, } } diff --git a/internal/test/admin_role_resource_test.go b/internal/test/admin_role_resource_test.go index 8b40917..abb360c 100644 --- a/internal/test/admin_role_resource_test.go +++ b/internal/test/admin_role_resource_test.go @@ -31,22 +31,22 @@ func TestAdminRoleResource(t *testing.T) { Config: fmt.Sprintf(adminRoleTestResource, name), Check: resource.ComposeAggregateTestCheckFunc( // Verify the name of the admin role - resource.TestCheckResourceAttr("citrix_daas_admin_role.test_role", "name", name), + resource.TestCheckResourceAttr("citrix_admin_role.test_role", "name", name), // Verify the description of the admin role - resource.TestCheckResourceAttr("citrix_daas_admin_role.test_role", "description", "Test role created via terraform"), + resource.TestCheckResourceAttr("citrix_admin_role.test_role", "description", "Test role created via terraform"), // Verify the value of the can_launch_manage flag (Set to true by default) - resource.TestCheckResourceAttr("citrix_daas_admin_role.test_role", "can_launch_manage", "true"), + resource.TestCheckResourceAttr("citrix_admin_role.test_role", "can_launch_manage", "true"), // Verify the value of the can_launch_monitor flag (Set to true by default) - resource.TestCheckResourceAttr("citrix_daas_admin_role.test_role", "can_launch_monitor", "true"), + resource.TestCheckResourceAttr("citrix_admin_role.test_role", "can_launch_monitor", "true"), // Verify the permissions list - resource.TestCheckResourceAttr("citrix_daas_admin_role.test_role", "permissions.#", "2"), - resource.TestCheckResourceAttr("citrix_daas_admin_role.test_role", "permissions.0", "Director_DismissAlerts"), - resource.TestCheckResourceAttr("citrix_daas_admin_role.test_role", "permissions.1", "DesktopGroup_AddApplicationGroup"), + resource.TestCheckResourceAttr("citrix_admin_role.test_role", "permissions.#", "2"), + resource.TestCheckResourceAttr("citrix_admin_role.test_role", "permissions.0", "Director_DismissAlerts"), + resource.TestCheckResourceAttr("citrix_admin_role.test_role", "permissions.1", "DesktopGroup_AddApplicationGroup"), ), }, // ImportState testing { - ResourceName: "citrix_daas_admin_role.test_role", + ResourceName: "citrix_admin_role.test_role", ImportState: true, ImportStateVerify: true, // The last_updated attribute does not exist in the Orchestration @@ -58,18 +58,18 @@ func TestAdminRoleResource(t *testing.T) { Config: fmt.Sprintf(adminRoleTestResource_updated, name), Check: resource.ComposeAggregateTestCheckFunc( // Verify the name of the admin role - resource.TestCheckResourceAttr("citrix_daas_admin_role.test_role", "name", fmt.Sprintf("%s-updated", name)), + resource.TestCheckResourceAttr("citrix_admin_role.test_role", "name", fmt.Sprintf("%s-updated", name)), // Verify the description of the admin role - resource.TestCheckResourceAttr("citrix_daas_admin_role.test_role", "description", "Updated description for test role"), + resource.TestCheckResourceAttr("citrix_admin_role.test_role", "description", "Updated description for test role"), // Verify the value of the can_launch_manage flag - resource.TestCheckResourceAttr("citrix_daas_admin_role.test_role", "can_launch_manage", "true"), + resource.TestCheckResourceAttr("citrix_admin_role.test_role", "can_launch_manage", "true"), // Verify the value of the can_launch_monitor flag - resource.TestCheckResourceAttr("citrix_daas_admin_role.test_role", "can_launch_monitor", "true"), + resource.TestCheckResourceAttr("citrix_admin_role.test_role", "can_launch_monitor", "true"), // Verify the permissions list - resource.TestCheckResourceAttr("citrix_daas_admin_role.test_role", "permissions.#", "3"), - resource.TestCheckResourceAttr("citrix_daas_admin_role.test_role", "permissions.0", "Director_DismissAlerts"), - resource.TestCheckResourceAttr("citrix_daas_admin_role.test_role", "permissions.1", "ApplicationGroup_AddScope"), - resource.TestCheckResourceAttr("citrix_daas_admin_role.test_role", "permissions.2", "AppLib_AddPackage"), + resource.TestCheckResourceAttr("citrix_admin_role.test_role", "permissions.#", "3"), + resource.TestCheckResourceAttr("citrix_admin_role.test_role", "permissions.0", "Director_DismissAlerts"), + resource.TestCheckResourceAttr("citrix_admin_role.test_role", "permissions.1", "ApplicationGroup_AddScope"), + resource.TestCheckResourceAttr("citrix_admin_role.test_role", "permissions.2", "AppLib_AddPackage"), ), }, // Delete testing automatically occurs in TestCase @@ -79,14 +79,14 @@ func TestAdminRoleResource(t *testing.T) { var ( adminRoleTestResource = ` - resource "citrix_daas_admin_role" "test_role" { + resource "citrix_admin_role" "test_role" { name = "%s" description = "Test role created via terraform" permissions = ["Director_DismissAlerts", "DesktopGroup_AddApplicationGroup"] } ` adminRoleTestResource_updated = ` - resource "citrix_daas_admin_role" "test_role" { + resource "citrix_admin_role" "test_role" { name = "%s-updated" description = "Updated description for test role" can_launch_manage = true diff --git a/internal/test/admin_scope_data_source_test.go b/internal/test/admin_scope_data_source_test.go index 3241245..523ca16 100644 --- a/internal/test/admin_scope_data_source_test.go +++ b/internal/test/admin_scope_data_source_test.go @@ -18,19 +18,19 @@ func TestAdminScopeDataSource(t *testing.T) { Config: admin_scope_test_data_source_using_name, Check: resource.ComposeAggregateTestCheckFunc( // Verify the ID of the admin scope - resource.TestCheckResourceAttr("data.citrix_daas_admin_scope.test_scope_by_name", "id", "00000000-0000-0000-0000-000000000000"), + resource.TestCheckResourceAttr("data.citrix_admin_scope.test_scope_by_name", "id", "00000000-0000-0000-0000-000000000000"), // Verify the description of the admin scope - resource.TestCheckResourceAttr("data.citrix_daas_admin_scope.test_scope_by_name", "description", "All objects"), + resource.TestCheckResourceAttr("data.citrix_admin_scope.test_scope_by_name", "description", "All objects"), // Verify the is_built_in attribute of the admin scope (Value should be true for "All" scope) - resource.TestCheckResourceAttr("data.citrix_daas_admin_scope.test_scope_by_name", "is_built_in", "true"), + resource.TestCheckResourceAttr("data.citrix_admin_scope.test_scope_by_name", "is_built_in", "true"), // Verify the is_all_scope attribute of the admin scope (Value should be true for "All" scope) - resource.TestCheckResourceAttr("data.citrix_daas_admin_scope.test_scope_by_name", "is_all_scope", "true"), + resource.TestCheckResourceAttr("data.citrix_admin_scope.test_scope_by_name", "is_all_scope", "true"), // Verify the is_tenant_scope attribute of the admin scope (Value should be false for "All" scope) - resource.TestCheckResourceAttr("data.citrix_daas_admin_scope.test_scope_by_name", "is_tenant_scope", "false"), + resource.TestCheckResourceAttr("data.citrix_admin_scope.test_scope_by_name", "is_tenant_scope", "false"), // Verify the tenant_id attribute of the admin scope (Value should be empty for "All" scope) - resource.TestCheckResourceAttr("data.citrix_daas_admin_scope.test_scope_by_name", "tenant_id", ""), + resource.TestCheckResourceAttr("data.citrix_admin_scope.test_scope_by_name", "tenant_id", ""), // Verify the tenant_name attribute of the admin scope (Value should be empty for "All" scope) - resource.TestCheckResourceAttr("data.citrix_daas_admin_scope.test_scope_by_name", "tenant_name", ""), + resource.TestCheckResourceAttr("data.citrix_admin_scope.test_scope_by_name", "tenant_name", ""), ), }, // Read testing using Name @@ -38,19 +38,19 @@ func TestAdminScopeDataSource(t *testing.T) { Config: admin_scope_test_data_source_using_id, Check: resource.ComposeAggregateTestCheckFunc( // Verify the name of the admin scope - resource.TestCheckResourceAttr("data.citrix_daas_admin_scope.test_scope_by_id", "name", "All"), + resource.TestCheckResourceAttr("data.citrix_admin_scope.test_scope_by_id", "name", "All"), // Verify the description of the admin scope - resource.TestCheckResourceAttr("data.citrix_daas_admin_scope.test_scope_by_id", "description", "All objects"), + resource.TestCheckResourceAttr("data.citrix_admin_scope.test_scope_by_id", "description", "All objects"), // Verify the is_built_in attribute of the admin scope (Value should be true for "All" scope) - resource.TestCheckResourceAttr("data.citrix_daas_admin_scope.test_scope_by_id", "is_built_in", "true"), + resource.TestCheckResourceAttr("data.citrix_admin_scope.test_scope_by_id", "is_built_in", "true"), // Verify the is_all_scope attribute of the admin scope (Value should be true for "All" scope) - resource.TestCheckResourceAttr("data.citrix_daas_admin_scope.test_scope_by_id", "is_all_scope", "true"), + resource.TestCheckResourceAttr("data.citrix_admin_scope.test_scope_by_id", "is_all_scope", "true"), // Verify the is_tenant_scope attribute of the admin scope (Value should be false for "All" scope) - resource.TestCheckResourceAttr("data.citrix_daas_admin_scope.test_scope_by_id", "is_tenant_scope", "false"), + resource.TestCheckResourceAttr("data.citrix_admin_scope.test_scope_by_id", "is_tenant_scope", "false"), // Verify the tenant_id attribute of the admin scope (Value should be empty for "All" scope) - resource.TestCheckResourceAttr("data.citrix_daas_admin_scope.test_scope_by_id", "tenant_id", ""), + resource.TestCheckResourceAttr("data.citrix_admin_scope.test_scope_by_id", "tenant_id", ""), // Verify the tenant_name attribute of the admin scope (Value should be empty for "All" scope) - resource.TestCheckResourceAttr("data.citrix_daas_admin_scope.test_scope_by_id", "tenant_name", ""), + resource.TestCheckResourceAttr("data.citrix_admin_scope.test_scope_by_id", "tenant_name", ""), ), }, }, @@ -59,7 +59,7 @@ func TestAdminScopeDataSource(t *testing.T) { var ( admin_scope_test_data_source_using_name = ` - data "citrix_daas_admin_scope" "test_scope_by_name" { + data "citrix_admin_scope" "test_scope_by_name" { name = "All" } ` @@ -67,7 +67,7 @@ var ( var ( admin_scope_test_data_source_using_id = ` - data "citrix_daas_admin_scope" "test_scope_by_id" { + data "citrix_admin_scope" "test_scope_by_id" { id = "00000000-0000-0000-0000-000000000000" } ` diff --git a/internal/test/admin_scope_resource_test.go b/internal/test/admin_scope_resource_test.go index e204131..abfa1a6 100644 --- a/internal/test/admin_scope_resource_test.go +++ b/internal/test/admin_scope_resource_test.go @@ -26,7 +26,6 @@ func TestAdminScopeResource(t *testing.T) { ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, PreCheck: func() { TestProviderPreCheck(t) - TestZonePreCheck(t) TestHypervisorPreCheck_Azure(t) TestHypervisorResourcePoolPreCheck_Azure(t) TestMachineCatalogPreCheck_Azure(t) @@ -39,19 +38,19 @@ func TestAdminScopeResource(t *testing.T) { Config: BuildAdminScopeResource(t, adminScopeTestResource), Check: resource.ComposeAggregateTestCheckFunc( // Verify the name of the admin scope - resource.TestCheckResourceAttr("citrix_daas_admin_scope.test_scope", "name", name), + resource.TestCheckResourceAttr("citrix_admin_scope.test_scope", "name", name), // Verify the description of the admin scope - resource.TestCheckResourceAttr("citrix_daas_admin_scope.test_scope", "description", "test scope created via terraform"), + resource.TestCheckResourceAttr("citrix_admin_scope.test_scope", "description", "test scope created via terraform"), // Verify number of scoped objects - resource.TestCheckResourceAttr("citrix_daas_admin_scope.test_scope", "scoped_objects.#", "1"), + resource.TestCheckResourceAttr("citrix_admin_scope.test_scope", "scoped_objects.#", "1"), // Verify the scoped objects data - resource.TestCheckResourceAttr("citrix_daas_admin_scope.test_scope", "scoped_objects.0.object_type", "DeliveryGroup"), - resource.TestCheckResourceAttr("citrix_daas_admin_scope.test_scope", "scoped_objects.0.object", dgName), + resource.TestCheckResourceAttr("citrix_admin_scope.test_scope", "scoped_objects.0.object_type", "DeliveryGroup"), + resource.TestCheckResourceAttr("citrix_admin_scope.test_scope", "scoped_objects.0.object", dgName), ), }, // ImportState testing { - ResourceName: "citrix_daas_admin_scope.test_scope", + ResourceName: "citrix_admin_scope.test_scope", ImportState: true, ImportStateVerify: true, // The last_updated attribute does not exist in the Orchestration @@ -63,16 +62,16 @@ func TestAdminScopeResource(t *testing.T) { Config: BuildAdminScopeResource(t, adminScopeTestResource_updated), Check: resource.ComposeAggregateTestCheckFunc( // Verify the name of the admin scope - resource.TestCheckResourceAttr("citrix_daas_admin_scope.test_scope", "name", fmt.Sprintf("%s-updated", name)), + resource.TestCheckResourceAttr("citrix_admin_scope.test_scope", "name", fmt.Sprintf("%s-updated", name)), // Verify the description of the admin scope - resource.TestCheckResourceAttr("citrix_daas_admin_scope.test_scope", "description", "Updated description for test scope"), + resource.TestCheckResourceAttr("citrix_admin_scope.test_scope", "description", "Updated description for test scope"), // Verify number of scoped objects - resource.TestCheckResourceAttr("citrix_daas_admin_scope.test_scope", "scoped_objects.#", "2"), + resource.TestCheckResourceAttr("citrix_admin_scope.test_scope", "scoped_objects.#", "2"), // Verify the scoped objects data - resource.TestCheckResourceAttr("citrix_daas_admin_scope.test_scope", "scoped_objects.0.object_type", "DeliveryGroup"), - resource.TestCheckResourceAttr("citrix_daas_admin_scope.test_scope", "scoped_objects.0.object", dgName), - resource.TestCheckResourceAttr("citrix_daas_admin_scope.test_scope", "scoped_objects.1.object_type", "MachineCatalog"), - resource.TestCheckResourceAttr("citrix_daas_admin_scope.test_scope", "scoped_objects.1.object", catalogName), + resource.TestCheckResourceAttr("citrix_admin_scope.test_scope", "scoped_objects.0.object_type", "DeliveryGroup"), + resource.TestCheckResourceAttr("citrix_admin_scope.test_scope", "scoped_objects.0.object", dgName), + resource.TestCheckResourceAttr("citrix_admin_scope.test_scope", "scoped_objects.1.object_type", "MachineCatalog"), + resource.TestCheckResourceAttr("citrix_admin_scope.test_scope", "scoped_objects.1.object", catalogName), ), }, // Delete testing automatically occurs in TestCase @@ -82,29 +81,29 @@ func TestAdminScopeResource(t *testing.T) { var ( adminScopeTestResource = ` - resource "citrix_daas_admin_scope" "test_scope" { + resource "citrix_admin_scope" "test_scope" { name = "%s" description = "test scope created via terraform" scoped_objects = [ { object_type = "DeliveryGroup", - object = citrix_daas_delivery_group.testDeliveryGroup.name + object = citrix_delivery_group.testDeliveryGroup.name } ] } ` adminScopeTestResource_updated = ` - resource "citrix_daas_admin_scope" "test_scope" { + resource "citrix_admin_scope" "test_scope" { name = "%s-updated" description = "Updated description for test scope" scoped_objects = [ { object_type = "DeliveryGroup", - object = citrix_daas_delivery_group.testDeliveryGroup.name + object = citrix_delivery_group.testDeliveryGroup.name }, { object_type = "MachineCatalog", - object = citrix_daas_machine_catalog.testMachineCatalog.name + object = citrix_machine_catalog.testMachineCatalog.name } ] } diff --git a/internal/test/application_folder_resource_test.go b/internal/test/application_folder_resource_test.go index 90588a7..a003597 100644 --- a/internal/test/application_folder_resource_test.go +++ b/internal/test/application_folder_resource_test.go @@ -33,18 +33,18 @@ func TestApplicationFolderResource(t *testing.T) { Config: BuildApplicationFolderResource(t, testApplicationFolderResource), Check: resource.ComposeAggregateTestCheckFunc( // Verify name of application - resource.TestCheckResourceAttr("citrix_daas_application_folder.testApplicationFolder1", "name", folder_name_1), + resource.TestCheckResourceAttr("citrix_application_folder.testApplicationFolder1", "name", folder_name_1), // Verify name of application - resource.TestCheckResourceAttr("citrix_daas_application_folder.testApplicationFolder2", "name", folder_name_2), + resource.TestCheckResourceAttr("citrix_application_folder.testApplicationFolder2", "name", folder_name_2), // Verify parent path of application - resource.TestCheckResourceAttr("citrix_daas_application_folder.testApplicationFolder2", "parent_path", fmt.Sprintf("%s\\", folder_name_1)), + resource.TestCheckResourceAttr("citrix_application_folder.testApplicationFolder2", "parent_path", fmt.Sprintf("%s\\", folder_name_1)), // Verify path of application - resource.TestCheckResourceAttr("citrix_daas_application_folder.testApplicationFolder2", "path", fmt.Sprintf("%s\\%s\\", folder_name_1, folder_name_2)), + resource.TestCheckResourceAttr("citrix_application_folder.testApplicationFolder2", "path", fmt.Sprintf("%s\\%s\\", folder_name_1, folder_name_2)), ), }, // ImportState testing { - ResourceName: "citrix_daas_application_folder.testApplicationFolder2", + ResourceName: "citrix_application_folder.testApplicationFolder2", ImportState: true, ImportStateVerify: true, }, @@ -53,9 +53,9 @@ func TestApplicationFolderResource(t *testing.T) { Config: BuildApplicationFolderResource(t, testApplicationFolderResource_updated), Check: resource.ComposeAggregateTestCheckFunc( // Verify name of application - resource.TestCheckResourceAttr("citrix_daas_application_folder.testApplicationFolder1", "name", fmt.Sprintf("%s-updated", folder_name_1)), + resource.TestCheckResourceAttr("citrix_application_folder.testApplicationFolder1", "name", fmt.Sprintf("%s-updated", folder_name_1)), // Verify parent path of application - resource.TestCheckResourceAttr("citrix_daas_application_folder.testApplicationFolder2", "path", fmt.Sprintf("%s\\", folder_name_2)), + resource.TestCheckResourceAttr("citrix_application_folder.testApplicationFolder2", "path", fmt.Sprintf("%s\\", folder_name_2)), ), }, // Delete testing automatically occurs in TestCase @@ -65,21 +65,21 @@ func TestApplicationFolderResource(t *testing.T) { var ( testApplicationFolderResource = ` -resource "citrix_daas_application_folder" "testApplicationFolder1" { +resource "citrix_application_folder" "testApplicationFolder1" { name = "%s" } -resource "citrix_daas_application_folder" "testApplicationFolder2" { +resource "citrix_application_folder" "testApplicationFolder2" { name = "%s" - parent_path = citrix_daas_application_folder.testApplicationFolder1.path + parent_path = citrix_application_folder.testApplicationFolder1.path } ` testApplicationFolderResource_updated = ` -resource "citrix_daas_application_folder" "testApplicationFolder1" { +resource "citrix_application_folder" "testApplicationFolder1" { name = "%s-updated" } -resource "citrix_daas_application_folder" "testApplicationFolder2" { +resource "citrix_application_folder" "testApplicationFolder2" { name = "%s" } ` diff --git a/internal/test/application_resource_test.go b/internal/test/application_resource_test.go index 8dfa0a3..c130bc3 100644 --- a/internal/test/application_resource_test.go +++ b/internal/test/application_resource_test.go @@ -23,7 +23,6 @@ func TestApplicationResource(t *testing.T) { ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, PreCheck: func() { TestProviderPreCheck(t) - TestZonePreCheck(t) TestHypervisorPreCheck_Azure(t) TestHypervisorResourcePoolPreCheck_Azure(t) TestMachineCatalogPreCheck_Azure(t) @@ -37,18 +36,18 @@ func TestApplicationResource(t *testing.T) { Config: BuildApplicationResource(t, testApplicationResource), Check: resource.ComposeAggregateTestCheckFunc( // Verify name of application - resource.TestCheckResourceAttr("citrix_daas_application.testApplication", "name", name), + resource.TestCheckResourceAttr("citrix_application.testApplication", "name", name), // Verify description of application - resource.TestCheckResourceAttr("citrix_daas_application.testApplication", "description", "Application for testing"), + resource.TestCheckResourceAttr("citrix_application.testApplication", "description", "Application for testing"), // Verify the number of delivery groups - resource.TestCheckResourceAttr("citrix_daas_application.testApplication", "delivery_groups.#", "1"), + resource.TestCheckResourceAttr("citrix_application.testApplication", "delivery_groups.#", "1"), // Verify the command line executable - resource.TestCheckResourceAttr("citrix_daas_application.testApplication", "installed_app_properties.command_line_executable", "test.exe"), + resource.TestCheckResourceAttr("citrix_application.testApplication", "installed_app_properties.command_line_executable", "test.exe"), ), }, // ImportState testing { - ResourceName: "citrix_daas_application.testApplication", + ResourceName: "citrix_application.testApplication", ImportState: true, ImportStateVerify: true, // The last_updated attribute does not exist in the Orchestration @@ -60,15 +59,15 @@ func TestApplicationResource(t *testing.T) { Config: BuildApplicationResource(t, testApplicationResource_updated), Check: resource.ComposeAggregateTestCheckFunc( // Verify name of application - resource.TestCheckResourceAttr("citrix_daas_application.testApplication", "name", fmt.Sprintf("%s-updated", name)), + resource.TestCheckResourceAttr("citrix_application.testApplication", "name", fmt.Sprintf("%s-updated", name)), // Verify description of application - resource.TestCheckResourceAttr("citrix_daas_application.testApplication", "description", "Application for testing updated"), + resource.TestCheckResourceAttr("citrix_application.testApplication", "description", "Application for testing updated"), // Verify the command line arguments - resource.TestCheckResourceAttr("citrix_daas_application.testApplication", "installed_app_properties.command_line_arguments", "update test arguments"), + resource.TestCheckResourceAttr("citrix_application.testApplication", "installed_app_properties.command_line_arguments", "update test arguments"), // Verify the command line executable - resource.TestCheckResourceAttr("citrix_daas_application.testApplication", "installed_app_properties.command_line_executable", "updated_test.exe"), + resource.TestCheckResourceAttr("citrix_application.testApplication", "installed_app_properties.command_line_executable", "updated_test.exe"), // Verify the application folder path - resource.TestCheckResourceAttr("citrix_daas_application.testApplication", "application_folder_path", fmt.Sprintf("%s\\", updated_folder_name)), + resource.TestCheckResourceAttr("citrix_application.testApplication", "application_folder_path", fmt.Sprintf("%s\\", updated_folder_name)), ), }, // Delete testing @@ -78,7 +77,7 @@ func TestApplicationResource(t *testing.T) { var ( testApplicationResource = ` -resource "citrix_daas_application" "testApplication" { +resource "citrix_application" "testApplication" { name = "%s" description = "Application for testing" published_name = "TestApplication" @@ -86,10 +85,10 @@ resource "citrix_daas_application" "testApplication" { command_line_executable = "test.exe" working_directory = "test directory" } - delivery_groups = [citrix_daas_delivery_group.testDeliveryGroup.id] + delivery_groups = [citrix_delivery_group.testDeliveryGroup.id] }` testApplicationResource_updated = ` -resource "citrix_daas_application" "testApplication" { +resource "citrix_application" "testApplication" { name = "%s-updated" description = "Application for testing updated" published_name = "TestApplication" @@ -98,8 +97,8 @@ resource "citrix_daas_application" "testApplication" { command_line_executable = "updated_test.exe" working_directory = "test directory" } - delivery_groups = [citrix_daas_delivery_group.testDeliveryGroup.id] - application_folder_path = citrix_daas_application_folder.testApplicationFolder2.path + delivery_groups = [citrix_delivery_group.testDeliveryGroup.id] + application_folder_path = citrix_application_folder.testApplicationFolder2.path }` ) diff --git a/internal/test/delivery_group_test.go b/internal/test/delivery_group_test.go index efc2cf7..f87820b 100644 --- a/internal/test/delivery_group_test.go +++ b/internal/test/delivery_group_test.go @@ -16,6 +16,10 @@ func TestDeliveryGroupPreCheck(t *testing.T) { if v := os.Getenv("TEST_DG_NAME"); v == "" { t.Fatal("TEST_DG_NAME must be set for acceptance tests") } + + if v := os.Getenv("TEST_POLICY_SET_WITHOUT_DG_NAME"); v == "" { + t.Fatal("TEST_POLICY_SET_WITHOUT_DG_NAME must be set for acceptance tests") + } } func TestDeliveryGroupResourceAzureRM(t *testing.T) { @@ -25,7 +29,6 @@ func TestDeliveryGroupResourceAzureRM(t *testing.T) { ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, PreCheck: func() { TestProviderPreCheck(t) - TestZonePreCheck(t) TestHypervisorPreCheck_Azure(t) TestHypervisorResourcePoolPreCheck_Azure(t) TestMachineCatalogPreCheck_Azure(t) @@ -38,21 +41,23 @@ func TestDeliveryGroupResourceAzureRM(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( // Verify name of delivery group - resource.TestCheckResourceAttr("citrix_daas_delivery_group.testDeliveryGroup", "name", name), + resource.TestCheckResourceAttr("citrix_delivery_group.testDeliveryGroup", "name", name), // Verify description of delivery group - resource.TestCheckResourceAttr("citrix_daas_delivery_group.testDeliveryGroup", "description", "Delivery Group for testing"), + resource.TestCheckResourceAttr("citrix_delivery_group.testDeliveryGroup", "description", "Delivery Group for testing"), // Verify number of desktops - resource.TestCheckResourceAttr("citrix_daas_delivery_group.testDeliveryGroup", "desktops.#", "2"), + resource.TestCheckResourceAttr("citrix_delivery_group.testDeliveryGroup", "desktops.#", "2"), // Verify number of reboot schedules - resource.TestCheckResourceAttr("citrix_daas_delivery_group.testDeliveryGroup", "reboot_schedules.#", "2"), + resource.TestCheckResourceAttr("citrix_delivery_group.testDeliveryGroup", "reboot_schedules.#", "2"), // Verify total number of machines in delivery group - resource.TestCheckResourceAttr("citrix_daas_delivery_group.testDeliveryGroup", "total_machines", "1"), + resource.TestCheckResourceAttr("citrix_delivery_group.testDeliveryGroup", "total_machines", "1"), + // Verify the policy set id assigned to the delivery group + resource.TestCheckNoResourceAttr("citrix_delivery_group.testDeliveryGroup", "policy_set_id"), ), }, // ImportState testing { - ResourceName: "citrix_daas_delivery_group.testDeliveryGroup", + ResourceName: "citrix_delivery_group.testDeliveryGroup", ImportState: true, ImportStateVerify: true, // The last_updated attribute does not exist in the Orchestration @@ -66,15 +71,17 @@ func TestDeliveryGroupResourceAzureRM(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( // Verify name of delivery group - resource.TestCheckResourceAttr("citrix_daas_delivery_group.testDeliveryGroup", "name", fmt.Sprintf("%s-updated", name)), + resource.TestCheckResourceAttr("citrix_delivery_group.testDeliveryGroup", "name", fmt.Sprintf("%s-updated", name)), // Verify description of delivery group - resource.TestCheckResourceAttr("citrix_daas_delivery_group.testDeliveryGroup", "description", "Delivery Group for testing updated"), + resource.TestCheckResourceAttr("citrix_delivery_group.testDeliveryGroup", "description", "Delivery Group for testing updated"), // Verify number of desktops - resource.TestCheckResourceAttr("citrix_daas_delivery_group.testDeliveryGroup", "desktops.#", "1"), + resource.TestCheckResourceAttr("citrix_delivery_group.testDeliveryGroup", "desktops.#", "1"), // Verify number of reboot schedules - resource.TestCheckResourceAttr("citrix_daas_delivery_group.testDeliveryGroup", "reboot_schedules.#", "1"), + resource.TestCheckResourceAttr("citrix_delivery_group.testDeliveryGroup", "reboot_schedules.#", "1"), // Verify total number of machines in delivery group - resource.TestCheckResourceAttr("citrix_daas_delivery_group.testDeliveryGroup", "total_machines", "2"), + resource.TestCheckResourceAttr("citrix_delivery_group.testDeliveryGroup", "total_machines", "2"), + // Verify the policy set id assigned to the delivery group + resource.TestCheckNoResourceAttr("citrix_delivery_group.testDeliveryGroup", "policy_set_id"), ), }, @@ -84,7 +91,18 @@ func TestDeliveryGroupResourceAzureRM(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( // Verify total number of machines in delivery group - resource.TestCheckResourceAttr("citrix_daas_delivery_group.testDeliveryGroup", "total_machines", "1"), + resource.TestCheckResourceAttr("citrix_delivery_group.testDeliveryGroup", "total_machines", "1"), + // Verify the policy set id assigned to the delivery group + resource.TestCheckNoResourceAttr("citrix_delivery_group.testDeliveryGroup", "policy_set_id"), + ), + }, + // Update policy set testing + { + Config: BuildDeliveryGroupResource(t, testDeliveryGroupResources_updatedWithPolicySetId), + + Check: resource.ComposeAggregateTestCheckFunc( + // Verify the policy set id assigned to the delivery group + resource.TestCheckResourceAttrSet("citrix_delivery_group.testDeliveryGroup", "policy_set_id"), ), }, }, @@ -93,12 +111,12 @@ func TestDeliveryGroupResourceAzureRM(t *testing.T) { var ( testDeliveryGroupResources = ` -resource "citrix_daas_delivery_group" "testDeliveryGroup" { +resource "citrix_delivery_group" "testDeliveryGroup" { name = "%s" description = "Delivery Group for testing" associated_machine_catalogs = [ { - machine_catalog = citrix_daas_machine_catalog.testMachineCatalog.id + machine_catalog = citrix_machine_catalog.testMachineCatalog.id machine_count = 1 } ] @@ -181,16 +199,80 @@ resource "citrix_daas_delivery_group" "testDeliveryGroup" { } } ] - } ` testDeliveryGroupResources_updated = ` -resource "citrix_daas_delivery_group" "testDeliveryGroup" { +resource "citrix_delivery_group" "testDeliveryGroup" { + name = "%s-updated" + description = "Delivery Group for testing updated" + associated_machine_catalogs = [ + { + machine_catalog = citrix_machine_catalog.testMachineCatalog.id + machine_count = 2 + } + ] + desktops = [ + { + published_name = "desktop-1" + enabled = true + enable_session_roaming = true + } + ] + autoscale_settings = { + autoscale_enabled = true + power_time_schemes = [ + { + "days_of_week" = [ + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday" + ] + "name" = "weekdays test" + "display_name" = "weekdays schedule" + "peak_time_ranges" = [ + "09:00-17:00" + ] + "pool_size_schedules": [ + { + "time_range": "00:00-00:00", + "pool_size": 1 + } + ], + "pool_using_percentage": false + }, + ] + } + reboot_schedules = [ + { + name = "test_reboot_schedule" + reboot_schedule_enabled = true + frequency = "Weekly" + frequency_factor = 1 + days_in_week = [ + "Monday", + "Tuesday", + "Wednesday" + ] + start_time = "12:12" + start_date = "2024-05-25" + reboot_duration_minutes = 0 + ignore_maintenance_mode = true + natural_reboot_schedule = false + } + ] +} + +` + + testDeliveryGroupResources_updatedWithPolicySetId = ` +resource "citrix_delivery_group" "testDeliveryGroup" { name = "%s-updated" description = "Delivery Group for testing updated" associated_machine_catalogs = [ { - machine_catalog = citrix_daas_machine_catalog.testMachineCatalog.id + machine_catalog = citrix_machine_catalog.testMachineCatalog.id machine_count = 2 } ] @@ -245,14 +327,45 @@ resource "citrix_daas_delivery_group" "testDeliveryGroup" { natural_reboot_schedule = false } ] - + policy_set_id = citrix_policy_set.testPolicySetWithoutDG.id } +` + + policy_set_no_delivery_group_testResource = ` +resource "citrix_policy_set" "testPolicySetWithoutDG" { + name = "%s" + description = "Test policy set description updated" + scopes = [ "All" ] + type = "DeliveryGroupPolicies" + policies = [ + { + name = "first-test-policy" + description = "First test policy with priority 0" + is_enabled = true + policy_settings = [ + { + name = "AdvanceWarningPeriod" + value = "13:00:00" + use_default = false + }, + ] + policy_filters = [ + ] + } + ] +} ` ) func BuildDeliveryGroupResource(t *testing.T, deliveryGroup string) string { name := os.Getenv("TEST_DG_NAME") - return BuildMachineCatalogResourceAzure(t, machinecatalog_testResources_azure_updated) + fmt.Sprintf(deliveryGroup, name) + return BuildMachineCatalogResourceAzure(t, machinecatalog_testResources_azure_updated, "ActiveDirectory") + BuildPolicySetResourceWithoutDeliveryGroup(t) + fmt.Sprintf(deliveryGroup, name) +} + +func BuildPolicySetResourceWithoutDeliveryGroup(t *testing.T) string { + policySetName := os.Getenv("TEST_POLICY_SET_WITHOUT_DG_NAME") + + return fmt.Sprintf(policy_set_no_delivery_group_testResource, policySetName) } diff --git a/internal/test/hypervisor_resource_pool_test.go b/internal/test/hypervisor_resource_pool_test.go index 87f9412..d6ccf25 100644 --- a/internal/test/hypervisor_resource_pool_test.go +++ b/internal/test/hypervisor_resource_pool_test.go @@ -38,7 +38,6 @@ func TestHypervisorResourcePoolAzureRM(t *testing.T) { ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, PreCheck: func() { TestProviderPreCheck(t) - TestZonePreCheck(t) TestHypervisorPreCheck_Azure(t) TestHypervisorResourcePoolPreCheck_Azure(t) }, @@ -48,20 +47,20 @@ func TestHypervisorResourcePoolAzureRM(t *testing.T) { Config: BuildHypervisorResourcePoolResourceAzure(t, hypervisor_resource_pool_testResource_azure), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("citrix_daas_azure_hypervisor_resource_pool.testHypervisorResourcePool", "name", name), + resource.TestCheckResourceAttr("citrix_azure_hypervisor_resource_pool.testHypervisorResourcePool", "name", name), // Verify name of virtual network resource group name - resource.TestCheckResourceAttr("citrix_daas_azure_hypervisor_resource_pool.testHypervisorResourcePool", "virtual_network_resource_group", os.Getenv("TEST_HYPERV_RP_VIRTUAL_NETWORK_RESOURCE_GROUP")), + resource.TestCheckResourceAttr("citrix_azure_hypervisor_resource_pool.testHypervisorResourcePool", "virtual_network_resource_group", os.Getenv("TEST_HYPERV_RP_VIRTUAL_NETWORK_RESOURCE_GROUP")), // Verify name of virtual network - resource.TestCheckResourceAttr("citrix_daas_azure_hypervisor_resource_pool.testHypervisorResourcePool", "virtual_network", os.Getenv("TEST_HYPERV_RP_VIRTUAL_NETWORK")), + resource.TestCheckResourceAttr("citrix_azure_hypervisor_resource_pool.testHypervisorResourcePool", "virtual_network", os.Getenv("TEST_HYPERV_RP_VIRTUAL_NETWORK")), // Verify name of the region - resource.TestCheckResourceAttr("citrix_daas_azure_hypervisor_resource_pool.testHypervisorResourcePool", "region", os.Getenv("TEST_HYPERV_RP_REGION")), + resource.TestCheckResourceAttr("citrix_azure_hypervisor_resource_pool.testHypervisorResourcePool", "region", os.Getenv("TEST_HYPERV_RP_REGION")), // Verify subnets - resource.TestCheckResourceAttr("citrix_daas_azure_hypervisor_resource_pool.testHypervisorResourcePool", "subnets.#", strconv.Itoa(len(strings.Split(os.Getenv("Test_HYPERV_RP_SUBNETS"), ",")))), + resource.TestCheckResourceAttr("citrix_azure_hypervisor_resource_pool.testHypervisorResourcePool", "subnets.#", strconv.Itoa(len(strings.Split(os.Getenv("Test_HYPERV_RP_SUBNETS"), ",")))), ), }, // ImportState testing { - ResourceName: "citrix_daas_azure_hypervisor_resource_pool.testHypervisorResourcePool", + ResourceName: "citrix_azure_hypervisor_resource_pool.testHypervisorResourcePool", ImportState: true, ImportStateIdFunc: generateImportStateId, ImportStateVerify: true, @@ -71,7 +70,7 @@ func TestHypervisorResourcePoolAzureRM(t *testing.T) { { Config: BuildHypervisorResourcePoolResourceAzure(t, hypervisor_resource_pool_updated_testResource_azure), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("citrix_daas_azure_hypervisor_resource_pool.testHypervisorResourcePool", "name", fmt.Sprintf("%s-updated", name)), + resource.TestCheckResourceAttr("citrix_azure_hypervisor_resource_pool.testHypervisorResourcePool", "name", fmt.Sprintf("%s-updated", name)), ), }, }, @@ -103,7 +102,6 @@ func TestHypervisorResourcePoolGCP(t *testing.T) { ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, PreCheck: func() { TestProviderPreCheck(t) - TestZonePreCheck(t) TestHypervisorPreCheck_GCP(t) TestHypervisorResourcePoolPreCheck_GCP(t) }, @@ -113,20 +111,20 @@ func TestHypervisorResourcePoolGCP(t *testing.T) { Config: BuildHypervisorResourcePoolResourceGCP(t, hypervisor_resource_pool_testResource_gcp), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("citrix_daas_gcp_hypervisor_resource_pool.testHypervisorResourcePool", "name", name), + resource.TestCheckResourceAttr("citrix_gcp_hypervisor_resource_pool.testHypervisorResourcePool", "name", name), // Verify name of the region - resource.TestCheckResourceAttr("citrix_daas_gcp_hypervisor_resource_pool.testHypervisorResourcePool", "region", os.Getenv("TEST_HYPERV_RP_REGION_GCP")), + resource.TestCheckResourceAttr("citrix_gcp_hypervisor_resource_pool.testHypervisorResourcePool", "region", os.Getenv("TEST_HYPERV_RP_REGION_GCP")), // Verify subnets - resource.TestCheckResourceAttr("citrix_daas_gcp_hypervisor_resource_pool.testHypervisorResourcePool", "subnets.#", strconv.Itoa(len(strings.Split(os.Getenv("Test_HYPERV_RP_SUBNETS_GCP"), ",")))), + resource.TestCheckResourceAttr("citrix_gcp_hypervisor_resource_pool.testHypervisorResourcePool", "subnets.#", strconv.Itoa(len(strings.Split(os.Getenv("Test_HYPERV_RP_SUBNETS_GCP"), ",")))), // Verify name of the project - resource.TestCheckResourceAttr("citrix_daas_gcp_hypervisor_resource_pool.testHypervisorResourcePool", "project_name", os.Getenv("TEST_HYPERV_RP_PROJECT_NAME_GCP")), + resource.TestCheckResourceAttr("citrix_gcp_hypervisor_resource_pool.testHypervisorResourcePool", "project_name", os.Getenv("TEST_HYPERV_RP_PROJECT_NAME_GCP")), // Verify name of the vpc - resource.TestCheckResourceAttr("citrix_daas_gcp_hypervisor_resource_pool.testHypervisorResourcePool", "vpc", os.Getenv("TEST_HYPERV_RP_VPC_GCP")), + resource.TestCheckResourceAttr("citrix_gcp_hypervisor_resource_pool.testHypervisorResourcePool", "vpc", os.Getenv("TEST_HYPERV_RP_VPC_GCP")), ), }, // ImportState testing { - ResourceName: "citrix_daas_gcp_hypervisor_resource_pool.testHypervisorResourcePool", + ResourceName: "citrix_gcp_hypervisor_resource_pool.testHypervisorResourcePool", ImportState: true, ImportStateIdFunc: generateImportStateId_GCP, ImportStateVerify: true, @@ -135,7 +133,72 @@ func TestHypervisorResourcePoolGCP(t *testing.T) { { Config: BuildHypervisorResourcePoolResourceGCP(t, hypervisor_resource_pool_updated_testResource_gcp), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("citrix_daas_gcp_hypervisor_resource_pool.testHypervisorResourcePool", "name", fmt.Sprintf("%s-updated", name)), + resource.TestCheckResourceAttr("citrix_gcp_hypervisor_resource_pool.testHypervisorResourcePool", "name", fmt.Sprintf("%s-updated", name)), + ), + }, + }, + }) +} + +func TestHypervisorResourcePoolPreCheck_Xenserver(t *testing.T) { + if v := os.Getenv("TEST_HYPERV_RP_NAME_XENSERVER"); v == "" { + t.Fatal("TEST_HYPERV_RP_NAME_XENSERVER must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_RP_NETWORK_1_XENSERVER"); v == "" { + t.Fatal("TEST_HYPERV_RP_NETWORK_1_XENSERVER must be set for acceptance tests") + } + if v := os.Getenv("Test_HYPERV_RP_NETWORK_2_XENSERVER"); v == "" { + t.Fatal("Test_HYPERV_RP_NETWORK_2_XENSERVER must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_RP_STORAGE_XENSERVER"); v == "" { + t.Fatal("TEST_HYPERV_RP_STORAGE_XENSERVER must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_RP_TEMP_STORAGE_XENSERVER"); v == "" { + t.Fatal("TEST_HYPERV_RP_TEMP_STORAGE_XENSERVER must be set for acceptance tests") + } +} + +func TestHypervisorResourcePoolXenserver(t *testing.T) { + name := os.Getenv("TEST_HYPERV_RP_NAME_XENSERVER") + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + PreCheck: func() { + TestProviderPreCheck(t) + TestHypervisorPreCheck_Xenserver(t) + TestHypervisorResourcePoolPreCheck_Xenserver(t) + }, + Steps: []resource.TestStep{ + // Create and Read testing + { + Config: BuildHypervisorResourcePoolResourceXenServer(t), + + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("citrix_xenserver_hypervisor_resource_pool.testHypervisorResourcePool", "name", name), + // Verify name of the region + resource.TestCheckResourceAttr("citrix_xenserver_hypervisor_resource_pool.testHypervisorResourcePool", "networks.#", "1"), + resource.TestCheckResourceAttr("citrix_xenserver_hypervisor_resource_pool.testHypervisorResourcePool", "networks.0", os.Getenv("TEST_HYPERV_RP_NETWORK_1_XENSERVER")), + // Verify subnets + resource.TestCheckResourceAttr("citrix_xenserver_hypervisor_resource_pool.testHypervisorResourcePool", "storage.#", "1"), + resource.TestCheckResourceAttr("citrix_xenserver_hypervisor_resource_pool.testHypervisorResourcePool", "storage.0", os.Getenv("TEST_HYPERV_RP_STORAGE_XENSERVER")), + // Verify name of the project + resource.TestCheckResourceAttr("citrix_xenserver_hypervisor_resource_pool.testHypervisorResourcePool", "temporary_storage.#", "1"), + resource.TestCheckResourceAttr("citrix_xenserver_hypervisor_resource_pool.testHypervisorResourcePool", "temporary_storage.0", os.Getenv("TEST_HYPERV_RP_TEMP_STORAGE_XENSERVER")), + ), + }, + // ImportState testing + { + ResourceName: "citrix_xenserver_hypervisor_resource_pool.testHypervisorResourcePool", + ImportState: true, + ImportStateIdFunc: generateImportStateId_XenServer, + ImportStateVerify: true, + }, + // Update and Read + { + Config: BuildHypervisorResourcePoolResourceXenServerUpdated(t), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("citrix_xenserver_hypervisor_resource_pool.testHypervisorResourcePool", "name", fmt.Sprintf("%s-updated", name)), + resource.TestCheckResourceAttr("citrix_xenserver_hypervisor_resource_pool.testHypervisorResourcePool", "networks.#", "2"), ), }, }, @@ -143,7 +206,7 @@ func TestHypervisorResourcePoolGCP(t *testing.T) { } func generateImportStateId(state *terraform.State) (string, error) { - resourceName := "citrix_daas_azure_hypervisor_resource_pool.testHypervisorResourcePool" + resourceName := "citrix_azure_hypervisor_resource_pool.testHypervisorResourcePool" var rawState map[string]string for _, m := range state.Modules { if len(m.Resources) > 0 { @@ -157,7 +220,21 @@ func generateImportStateId(state *terraform.State) (string, error) { } func generateImportStateId_GCP(state *terraform.State) (string, error) { - resourceName := "citrix_daas_gcp_hypervisor_resource_pool.testHypervisorResourcePool" + resourceName := "citrix_gcp_hypervisor_resource_pool.testHypervisorResourcePool" + var rawState map[string]string + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[resourceName]; ok { + rawState = v.Primary.Attributes + } + } + } + + return fmt.Sprintf("%s,%s", rawState["hypervisor"], rawState["id"]), nil +} + +func generateImportStateId_XenServer(state *terraform.State) (string, error) { + resourceName := "citrix_xenserver_hypervisor_resource_pool.testHypervisorResourcePool" var rawState map[string]string for _, m := range state.Modules { if len(m.Resources) > 0 { @@ -172,9 +249,9 @@ func generateImportStateId_GCP(state *terraform.State) (string, error) { var ( hypervisor_resource_pool_testResource_azure = ` -resource "citrix_daas_azure_hypervisor_resource_pool" "testHypervisorResourcePool" { +resource "citrix_azure_hypervisor_resource_pool" "testHypervisorResourcePool" { name = "%s" - hypervisor = citrix_daas_azure_hypervisor.testHypervisor.id + hypervisor = citrix_azure_hypervisor.testHypervisor.id region = "%s" virtual_network_resource_group = "%s" virtual_network = "%s" @@ -183,9 +260,9 @@ resource "citrix_daas_azure_hypervisor_resource_pool" "testHypervisorResourcePoo ` hypervisor_resource_pool_updated_testResource_azure = ` -resource "citrix_daas_azure_hypervisor_resource_pool" "testHypervisorResourcePool" { +resource "citrix_azure_hypervisor_resource_pool" "testHypervisorResourcePool" { name = "%s-updated" - hypervisor = citrix_daas_azure_hypervisor.testHypervisor.id + hypervisor = citrix_azure_hypervisor.testHypervisor.id region = "%s" virtual_network_resource_group = "%s" virtual_network = "%s" @@ -193,9 +270,9 @@ resource "citrix_daas_azure_hypervisor_resource_pool" "testHypervisorResourcePoo } ` hypervisor_resource_pool_testResource_gcp = ` -resource "citrix_daas_gcp_hypervisor_resource_pool" "testHypervisorResourcePool" { +resource "citrix_gcp_hypervisor_resource_pool" "testHypervisorResourcePool" { name = "%s" - hypervisor = citrix_daas_gcp_hypervisor.testHypervisor.id + hypervisor = citrix_gcp_hypervisor.testHypervisor.id project_name = "%s" region = "%s" subnets = %s @@ -203,14 +280,33 @@ resource "citrix_daas_gcp_hypervisor_resource_pool" "testHypervisorResourcePool" } ` hypervisor_resource_pool_updated_testResource_gcp = ` -resource "citrix_daas_gcp_hypervisor_resource_pool" "testHypervisorResourcePool" { +resource "citrix_gcp_hypervisor_resource_pool" "testHypervisorResourcePool" { name = "%s-updated" - hypervisor = citrix_daas_gcp_hypervisor.testHypervisor.id + hypervisor = citrix_gcp_hypervisor.testHypervisor.id project_name = "%s" region = "%s" subnets = %s vpc = "%s" } +` + + hypervisor_resource_pool_testResource_xenserver = ` +resource "citrix_xenserver_hypervisor_resource_pool" "testHypervisorResourcePool" { + name = "%s" + hypervisor = citrix_xenserver_hypervisor.testHypervisor.id + networks = ["%s"] + storage = ["%s"] + temporary_storage = ["%s"] +} +` + hypervisor_resource_pool_updated_testResource_xenserver = ` +resource "citrix_xenserver_hypervisor_resource_pool" "testHypervisorResourcePool" { + name = "%s-updated" + hypervisor = citrix_xenserver_hypervisor.testHypervisor.id + networks = ["%s", "%s"] + storage = ["%s"] + temporary_storage = ["%s"] +} ` ) @@ -233,3 +329,22 @@ func BuildHypervisorResourcePoolResourceGCP(t *testing.T, hypervisorRP string) s return BuildHypervisorResourceGCP(t, hypervisor_testResources_gcp) + fmt.Sprintf(hypervisorRP, name, projectName, region, subnet, vpc) } + +func BuildHypervisorResourcePoolResourceXenServer(t *testing.T) string { + name := os.Getenv("TEST_HYPERV_RP_NAME_XENSERVER") + network1 := os.Getenv("TEST_HYPERV_RP_NETWORK_1_XENSERVER") + storage := os.Getenv("TEST_HYPERV_RP_STORAGE_XENSERVER") + tempStorage := os.Getenv("TEST_HYPERV_RP_TEMP_STORAGE_XENSERVER") + + return BuildHypervisorResourceXenserver(t, hypervisor_testResources_xenserver) + fmt.Sprintf(hypervisor_resource_pool_testResource_xenserver, name, network1, storage, tempStorage) +} + +func BuildHypervisorResourcePoolResourceXenServerUpdated(t *testing.T) string { + name := os.Getenv("TEST_HYPERV_RP_NAME_XENSERVER") + network1 := os.Getenv("TEST_HYPERV_RP_NETWORK_1_XENSERVER") + network2 := os.Getenv("TEST_HYPERV_RP_NETWORK_2_XENSERVER") + storage := os.Getenv("TEST_HYPERV_RP_STORAGE_XENSERVER") + tempStorage := os.Getenv("TEST_HYPERV_RP_TEMP_STORAGE_XENSERVER") + + return BuildHypervisorResourceXenserver(t, hypervisor_testResources_xenserver) + fmt.Sprintf(hypervisor_resource_pool_updated_testResource_xenserver, name, network1, network2, storage, tempStorage) +} diff --git a/internal/test/hypervisor_resource_test.go b/internal/test/hypervisor_resource_test.go index dcc2fa7..c1a05e3 100644 --- a/internal/test/hypervisor_resource_test.go +++ b/internal/test/hypervisor_resource_test.go @@ -13,8 +13,11 @@ import ( // testHypervisorPreCheck validates the necessary env variable exist // in the testing environment func TestHypervisorPreCheck_Azure(t *testing.T) { - if v := os.Getenv("TEST_HYPERV_NAME"); v == "" { - t.Fatal("TEST_HYPERV_NAME must be set for acceptance tests") + if v := os.Getenv("TEST_ZONE_NAME_AZURE"); v == "" { + t.Fatal("TEST_ZONE_NAME_AZURE must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_NAME_AZURE"); v == "" { + t.Fatal("TEST_HYPERV_NAME_AZURE must be set for acceptance tests") } if v := os.Getenv("TEST_HYPERV_AD_ID"); v == "" { t.Fatal("TEST_HYPERV_AD_ID must be set for acceptance tests") @@ -31,13 +34,12 @@ func TestHypervisorPreCheck_Azure(t *testing.T) { } func TestHypervisorResourceAzureRM(t *testing.T) { - name := os.Getenv("TEST_HYPERV_NAME") + name := os.Getenv("TEST_HYPERV_NAME_AZURE") resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, PreCheck: func() { TestProviderPreCheck(t) - TestZonePreCheck(t) TestHypervisorPreCheck_Azure(t) }, Steps: []resource.TestStep{ @@ -48,13 +50,13 @@ func TestHypervisorResourceAzureRM(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( // Verify name of hypervisor - resource.TestCheckResourceAttr("citrix_daas_azure_hypervisor.testHypervisor", "name", name), + resource.TestCheckResourceAttr("citrix_azure_hypervisor.testHypervisor", "name", name), ), }, // ImportState testing { - ResourceName: "citrix_daas_azure_hypervisor.testHypervisor", + ResourceName: "citrix_azure_hypervisor.testHypervisor", ImportState: true, ImportStateVerify: true, // The last_updated attribute does not exist in the Orchestration @@ -66,7 +68,7 @@ func TestHypervisorResourceAzureRM(t *testing.T) { Config: BuildHypervisorResourceAzure(t, hypervisor_testResources_updated), Check: resource.ComposeAggregateTestCheckFunc( // Verify name of hypervisor - resource.TestCheckResourceAttr("citrix_daas_azure_hypervisor.testHypervisor", "name", fmt.Sprintf("%s-updated", name)), + resource.TestCheckResourceAttr("citrix_azure_hypervisor.testHypervisor", "name", fmt.Sprintf("%s-updated", name)), ), }, }, @@ -74,6 +76,9 @@ func TestHypervisorResourceAzureRM(t *testing.T) { } func TestHypervisorPreCheck_GCP(t *testing.T) { + if v := os.Getenv("TEST_ZONE_NAME_GCP"); v == "" { + t.Fatal("TEST_ZONE_NAME_GCP must be set for acceptance tests") + } if v := os.Getenv("TEST_HYPERV_NAME_GCP"); v == "" { t.Fatal("TEST_HYPERV_NAME_GCP must be set for acceptance tests") } @@ -92,7 +97,6 @@ func TestHypervisorResourceGCP(t *testing.T) { ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, PreCheck: func() { TestProviderPreCheck(t) - TestZonePreCheck(t) TestHypervisorPreCheck_GCP(t) }, Steps: []resource.TestStep{ @@ -103,13 +107,13 @@ func TestHypervisorResourceGCP(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( // Verify name of hypervisor - resource.TestCheckResourceAttr("citrix_daas_gcp_hypervisor.testHypervisor", "name", name), + resource.TestCheckResourceAttr("citrix_gcp_hypervisor.testHypervisor", "name", name), ), }, // ImportState testing { - ResourceName: "citrix_daas_gcp_hypervisor.testHypervisor", + ResourceName: "citrix_gcp_hypervisor.testHypervisor", ImportState: true, ImportStateVerify: true, // The last_updated attribute does not exist in the Orchestration @@ -121,7 +125,198 @@ func TestHypervisorResourceGCP(t *testing.T) { Config: BuildHypervisorResourceGCP(t, hypervisor_testResources_updated_gcp), Check: resource.ComposeAggregateTestCheckFunc( // Verify name of hypervisor - resource.TestCheckResourceAttr("citrix_daas_gcp_hypervisor.testHypervisor", "name", fmt.Sprintf("%s-updated", name)), + resource.TestCheckResourceAttr("citrix_gcp_hypervisor.testHypervisor", "name", fmt.Sprintf("%s-updated", name)), + ), + }, + }, + }) +} + +func TestHypervisorPreCheck_Vsphere(t *testing.T) { + if v := os.Getenv("TEST_ZONE_NAME_VSPHERE"); v == "" { + t.Fatal("TEST_ZONE_NAME_VSPHERE must be set for acceptance tests") + } + + if v := os.Getenv("TEST_HYPERV_NAME_VSPHERE"); v == "" { + t.Fatal("TEST_HYPERV_NAME_VSPHERE must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_USERNAME_VSPHERE"); v == "" { + t.Fatal("TEST_HYPERV_USERNAME_VSPHERE must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_PASSWORD_PLAINTEXT_VSPHERE"); v == "" { + t.Fatal("TEST_HYPERV_PASSWORD_PLAINTEXT_VSPHERE must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_ADDRESS_VSPHERE"); v == "" { + t.Fatal("TEST_HYPERV_ADDRESS_VSPHERE must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_SSL_THUMBPRINT_VSPHERE"); v == "" { + t.Fatal("TEST_HYPERV_SSL_THUMBPRINT_VSPHERE must be set for acceptance tests") + } +} + +func TestHypervisorResourceVsphere(t *testing.T) { + name := os.Getenv("TEST_HYPERV_NAME_VSPHERE") + username := os.Getenv("TEST_HYPERV_USERNAME_VSPHERE") + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + PreCheck: func() { + TestProviderPreCheck(t) + TestHypervisorPreCheck_Vsphere(t) + }, + Steps: []resource.TestStep{ + // Create and Read testing + { + Config: BuildHypervisorResourceVsphere(t, hypervisor_testResources_vsphere), + Check: resource.ComposeAggregateTestCheckFunc( + // Verify name of hypervisor + resource.TestCheckResourceAttr("citrix_vsphere_hypervisor.testHypervisor", "name", name), + resource.TestCheckResourceAttr("citrix_vsphere_hypervisor.testHypervisor", "username", username), + resource.TestCheckResourceAttr("citrix_vsphere_hypervisor.testHypervisor", "addresses.#", "1"), + resource.TestCheckResourceAttr("citrix_vsphere_hypervisor.testHypervisor", "ssl_thumbprints.#", "1"), + ), + }, + + // ImportState testing + { + ResourceName: "citrix_vsphere_hypervisor.testHypervisor", + ImportState: true, + ImportStateVerify: true, + // The last_updated attribute does not exist in the Orchestration + // API, therefore there is no value for it during import. + ImportStateVerifyIgnore: []string{"password", "password_format"}, + }, + // Update and Read testing + { + Config: BuildHypervisorResourceVsphere(t, hypervisor_testResources_updated_vsphere), + Check: resource.ComposeAggregateTestCheckFunc( + // Verify name of hypervisor + resource.TestCheckResourceAttr("citrix_vsphere_hypervisor.testHypervisor", "name", fmt.Sprintf("%s-updated", name)), + ), + }, + }, + }) +} + +func TestHypervisorPreCheck_Xenserver(t *testing.T) { + if v := os.Getenv("TEST_ZONE_NAME_XENSERVER"); v == "" { + t.Fatal("TEST_ZONE_NAME_XENSERVER must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_NAME_XENSERVER"); v == "" { + t.Fatal("TEST_HYPERV_NAME_XENSERVER must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_USERNAME_XENSERVER"); v == "" { + t.Fatal("TEST_HYPERV_USERNAME_XENSERVER must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_PASSWORD_PLAINTEXT_XENSERVER"); v == "" { + t.Fatal("TEST_HYPERV_PASSWORD_PLAINTEXT_XENSERVER must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_ADDRESS_XENSERVER"); v == "" { + t.Fatal("TEST_HYPERV_ADDRESS_XENSERVER must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_SSL_THUMBPRINT_XENSERVER"); v == "" { + t.Fatal("TEST_HYPERV_SSL_THUMBPRINT_XENSERVER must be set for acceptance tests") + } +} + +func TestHypervisorResourceXenserver(t *testing.T) { + name := os.Getenv("TEST_HYPERV_NAME_XENSERVER") + username := os.Getenv("TEST_HYPERV_USERNAME_XENSERVER") + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + PreCheck: func() { + TestProviderPreCheck(t) + TestHypervisorPreCheck_Xenserver(t) + }, + Steps: []resource.TestStep{ + // Create and Read testing + { + Config: BuildHypervisorResourceXenserver(t, hypervisor_testResources_xenserver), + Check: resource.ComposeAggregateTestCheckFunc( + // Verify name of hypervisor + resource.TestCheckResourceAttr("citrix_xenserver_hypervisor.testHypervisor", "name", name), + resource.TestCheckResourceAttr("citrix_xenserver_hypervisor.testHypervisor", "username", username), + resource.TestCheckResourceAttr("citrix_xenserver_hypervisor.testHypervisor", "addresses.#", "1"), + resource.TestCheckResourceAttr("citrix_xenserver_hypervisor.testHypervisor", "ssl_thumbprints.#", "1"), + ), + }, + + // ImportState testing + { + ResourceName: "citrix_xenserver_hypervisor.testHypervisor", + ImportState: true, + ImportStateVerify: true, + // The last_updated attribute does not exist in the Orchestration + // API, therefore there is no value for it during import. + ImportStateVerifyIgnore: []string{"password", "password_format"}, + }, + // Update and Read testing + { + Config: BuildHypervisorResourceXenserver(t, hypervisor_testResources_updated_xenserver), + Check: resource.ComposeAggregateTestCheckFunc( + // Verify name of hypervisor + resource.TestCheckResourceAttr("citrix_xenserver_hypervisor.testHypervisor", "name", fmt.Sprintf("%s-updated", name)), + ), + }, + }, + }) +} + +func TestHypervisorPreCheck_Nutanix(t *testing.T) { + if v := os.Getenv("TEST_ZONE_NAME_NUTANIX"); v == "" { + t.Fatal("TEST_ZONE_NAME_NUTANIX must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_NAME_NUTANIX"); v == "" { + t.Fatal("TEST_HYPERV_NAME_NUTANIX must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_USERNAME_NUTANIX"); v == "" { + t.Fatal("TEST_HYPERV_USERNAME_NUTANIX must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_PASSWORD_PLAINTEXT_NUTANIX"); v == "" { + t.Fatal("TEST_HYPERV_PASSWORD_PLAINTEXT_NUTANIX must be set for acceptance tests") + } + if v := os.Getenv("TEST_HYPERV_ADDRESS_NUTANIX"); v == "" { + t.Fatal("TEST_HYPERV_ADDRESS_NUTANIX must be set for acceptance tests") + } +} + +func TestHypervisorResourceNutanix(t *testing.T) { + name := os.Getenv("TEST_HYPERV_NAME_NUTANIX") + username := os.Getenv("TEST_HYPERV_USERNAME_NUTANIX") + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + PreCheck: func() { + TestProviderPreCheck(t) + TestHypervisorPreCheck_Nutanix(t) + }, + Steps: []resource.TestStep{ + // Create and Read testing + { + Config: BuildHypervisorResourceNutanix(t, hypervisor_testResources_nutanix), + Check: resource.ComposeAggregateTestCheckFunc( + // Verify name of hypervisor + resource.TestCheckResourceAttr("citrix_nutanix_hypervisor.testHypervisor", "name", name), + resource.TestCheckResourceAttr("citrix_nutanix_hypervisor.testHypervisor", "username", username), + resource.TestCheckResourceAttr("citrix_nutanix_hypervisor.testHypervisor", "addresses.#", "1"), + ), + }, + // ImportState testing + { + ResourceName: "citrix_nutanix_hypervisor.testHypervisor", + ImportState: true, + ImportStateVerify: true, + // The last_updated attribute does not exist in the Orchestration + // API, therefore there is no value for it during import. + ImportStateVerifyIgnore: []string{"password", "password_format"}, + }, + // Update and Read testing + { + Config: BuildHypervisorResourceNutanix(t, hypervisor_testResources_updated_nutanix), + Check: resource.ComposeAggregateTestCheckFunc( + // Verify name of hypervisor + resource.TestCheckResourceAttr("citrix_nutanix_hypervisor.testHypervisor", "name", fmt.Sprintf("%s-updated", name)), ), }, }, @@ -131,7 +326,7 @@ func TestHypervisorResourceGCP(t *testing.T) { // test resources for AzureRM hypervisor var ( hypervisor_testResources = ` -resource "citrix_daas_azure_hypervisor" "testHypervisor" { +resource "citrix_azure_hypervisor" "testHypervisor" { name = "%s" zone = %s active_directory_id = "%s" @@ -142,7 +337,7 @@ resource "citrix_daas_azure_hypervisor" "testHypervisor" { ` hypervisor_testResources_updated = ` -resource "citrix_daas_azure_hypervisor" "testHypervisor" { +resource "citrix_azure_hypervisor" "testHypervisor" { name = "%s-updated" zone = %s active_directory_id = "%s" @@ -156,7 +351,7 @@ resource "citrix_daas_azure_hypervisor" "testHypervisor" { // test resources for GCP hypervisor var ( hypervisor_testResources_gcp = ` -resource "citrix_daas_gcp_hypervisor" "testHypervisor" { +resource "citrix_gcp_hypervisor" "testHypervisor" { name = "%s" zone = %s service_account_id = "%s" @@ -166,7 +361,7 @@ resource "citrix_daas_gcp_hypervisor" "testHypervisor" { ` hypervisor_testResources_updated_gcp = ` -resource "citrix_daas_gcp_hypervisor" "testHypervisor" { +resource "citrix_gcp_hypervisor" "testHypervisor" { name = "%s-updated" zone = %s service_account_id = "%s" @@ -176,23 +371,155 @@ resource "citrix_daas_gcp_hypervisor" "testHypervisor" { ` ) +// test resources for VSPHERE hypervisor +var ( + hypervisor_testResources_vsphere = ` + resource citrix_vsphere_hypervisor "testHypervisor" { + name = "%s" + zone = %s + username = "%s" + password = "%s" + password_format = "PlainText" + addresses = [ + "%s" + ] + ssl_thumbprints = [ + "%s" + ] + } + ` + + hypervisor_testResources_updated_vsphere = ` + resource citrix_vsphere_hypervisor "testHypervisor" { + name = "%s-updated" + zone = %s + username = "%s" + password = "%s" + password_format = "PlainText" + addresses = [ + "%s" + ] + ssl_thumbprints = [ + "%s" + ] + } + ` +) + +// test resources for XenServer hypervisor +var ( + hypervisor_testResources_xenserver = ` + resource citrix_xenserver_hypervisor "testHypervisor" { + name = "%s" + zone = %s + username = "%s" + password = "%s" + password_format = "PlainText" + addresses = [ + "%s" + ] + ssl_thumbprints = [ + "%s" + ] + } + ` + + hypervisor_testResources_updated_xenserver = ` + resource citrix_xenserver_hypervisor "testHypervisor" { + name = "%s-updated" + zone = %s + username = "%s" + password = "%s" + password_format = "PlainText" + addresses = [ + "%s" + ] + ssl_thumbprints = [ + "%s" + ] + } + ` +) + +// test resources for Nutanix hypervisor +var ( + hypervisor_testResources_nutanix = ` + resource citrix_nutanix_hypervisor "testHypervisor" { + name = "%s" + zone = %s + username = "%s" + password = "%s" + password_format = "PlainText" + addresses = [ + "%s" + ] + } + ` + + hypervisor_testResources_updated_nutanix = ` + resource citrix_nutanix_hypervisor "testHypervisor" { + name = "%s-updated" + zone = %s + username = "%s" + password = "%s" + password_format = "PlainText" + addresses = [ + "%s" + ] + } + ` +) + func BuildHypervisorResourceAzure(t *testing.T, hypervisor string) string { - name := os.Getenv("TEST_HYPERV_NAME") + name := os.Getenv("TEST_HYPERV_NAME_AZURE") tenantId := os.Getenv("TEST_HYPERV_AD_ID") subscriptionId := os.Getenv("TEST_HYPERV_SUBSCRIPTION_ID") applicationSecret := os.Getenv("TEST_HYPERV_APPLICATION_SECRET") applicationId := os.Getenv("TEST_HYPERV_APPLICATION_ID") + zoneValueForHypervisor := "citrix_zone.test.id" - zoneValueForHypervisor := "citrix_daas_zone.test.id" - - return BuildZoneResource(t, zone_testResource) + fmt.Sprintf(hypervisor, name, zoneValueForHypervisor, tenantId, subscriptionId, applicationSecret, applicationId) + zoneNameAzure := os.Getenv("TEST_ZONE_NAME_AZURE") + return BuildZoneResource(t, zone_testResource, zoneNameAzure) + fmt.Sprintf(hypervisor, name, zoneValueForHypervisor, tenantId, subscriptionId, applicationSecret, applicationId) } func BuildHypervisorResourceGCP(t *testing.T, hypervisor string) string { name := os.Getenv("TEST_HYPERV_NAME_GCP") serviceAccountId := os.Getenv("TEST_HYPERV_SERVICE_ACCOUNT_ID") serviceAccountCredential := os.Getenv("TEST_HYPERV_SERVICE_ACCOUNT_CREDENTIAL") - zoneValueForHypervisor := "citrix_daas_zone.test.id" - resource := BuildZoneResource(t, zone_testResource) + fmt.Sprintf(hypervisor, name, zoneValueForHypervisor, serviceAccountId, serviceAccountCredential) + zoneValueForHypervisor := "citrix_zone.test.id" + zoneNameGCP := os.Getenv("TEST_ZONE_NAME_GCP") + resource := BuildZoneResource(t, zone_testResource, zoneNameGCP) + fmt.Sprintf(hypervisor, name, zoneValueForHypervisor, serviceAccountId, serviceAccountCredential) return resource } + +func BuildHypervisorResourceVsphere(t *testing.T, hypervisor string) string { + name := os.Getenv("TEST_HYPERV_NAME_VSPHERE") + username := os.Getenv("TEST_HYPERV_USERNAME_VSPHERE") + password := os.Getenv("TEST_HYPERV_PASSWORD_PLAINTEXT_VSPHERE") + address := os.Getenv("TEST_HYPERV_ADDRESS_VSPHERE") + ssl_thumbprint := os.Getenv("TEST_HYPERV_SSL_THUMBPRINT_VSPHERE") + zoneValueForHypervisor := "citrix_zone.test.id" + zoneNameVsphere := os.Getenv("TEST_ZONE_NAME_VSPHERE") + return BuildZoneResource(t, zone_testResource, zoneNameVsphere) + fmt.Sprintf(hypervisor, name, zoneValueForHypervisor, username, password, address, ssl_thumbprint) +} + +func BuildHypervisorResourceXenserver(t *testing.T, hypervisor string) string { + name := os.Getenv("TEST_HYPERV_NAME_XENSERVER") + username := os.Getenv("TEST_HYPERV_USERNAME_XENSERVER") + password := os.Getenv("TEST_HYPERV_PASSWORD_PLAINTEXT_XENSERVER") + address := os.Getenv("TEST_HYPERV_ADDRESS_XENSERVER") + ssl_thumbprint := os.Getenv("TEST_HYPERV_SSL_THUMBPRINT_XENSERVER") + zoneValueForHypervisor := "citrix_zone.test.id" + zoneNameXenserver := os.Getenv("TEST_ZONE_NAME_XENSERVER") + return BuildZoneResource(t, zone_testResource, zoneNameXenserver) + fmt.Sprintf(hypervisor, name, zoneValueForHypervisor, username, password, address, ssl_thumbprint) +} + +func BuildHypervisorResourceNutanix(t *testing.T, hypervisor string) string { + name := os.Getenv("TEST_HYPERV_NAME_NUTANIX") + username := os.Getenv("TEST_HYPERV_USERNAME_NUTANIX") + password := os.Getenv("TEST_HYPERV_PASSWORD_PLAINTEXT_NUTANIX") + address := os.Getenv("TEST_HYPERV_ADDRESS_NUTANIX") + zoneValueForHypervisor := "citrix_zone.test.id" + zoneNameNutanix := os.Getenv("TEST_ZONE_NAME_NUTANIX") + return BuildZoneResource(t, zone_testResource, zoneNameNutanix) + fmt.Sprintf(hypervisor, name, zoneValueForHypervisor, username, password, address) +} diff --git a/internal/test/machine_catalog_resource_test.go b/internal/test/machine_catalog_resource_test.go index 8a1c052..2689dcd 100644 --- a/internal/test/machine_catalog_resource_test.go +++ b/internal/test/machine_catalog_resource_test.go @@ -43,14 +43,13 @@ func TestMachineCatalogPreCheck_Azure(t *testing.T) { } } -func TestMachineCatalogResourceAzure(t *testing.T) { +func TestActiveDirectoryMachineCatalogResourceAzure(t *testing.T) { name := os.Getenv("TEST_MC_NAME") resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, PreCheck: func() { TestProviderPreCheck(t) - TestZonePreCheck(t) TestHypervisorPreCheck_Azure(t) TestHypervisorResourcePoolPreCheck_Azure(t) TestMachineCatalogPreCheck_Azure(t) @@ -58,21 +57,23 @@ func TestMachineCatalogResourceAzure(t *testing.T) { Steps: []resource.TestStep{ // Create and Read testing { - Config: BuildMachineCatalogResourceAzure(t, machinecatalog_testResources_azure), + Config: BuildMachineCatalogResourceAzure(t, machinecatalog_testResources_azure, "ActiveDirectory"), Check: resource.ComposeAggregateTestCheckFunc( // Verify name of catalog - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "name", name), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "name", name), // Verify domain FQDN - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "session_support", "MultiSession"), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "session_support", "MultiSession"), // Verify domain admin username - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "provisioning_scheme.machine_domain_identity.service_account", os.Getenv("TEST_MC_SERVICE_ACCOUNT")), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.machine_domain_identity.service_account", os.Getenv("TEST_MC_SERVICE_ACCOUNT")), + // Verify machine catalog identity type + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.identity_type", "ActiveDirectory"), // Verify nic network - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "provisioning_scheme.network_mapping.network", os.Getenv("TEST_MC_SUBNET")), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.network_mapping.network", os.Getenv("TEST_MC_SUBNET")), ), }, // ImportState testing { - ResourceName: "citrix_daas_machine_catalog.testMachineCatalog", + ResourceName: "citrix_machine_catalog.testMachineCatalog", ImportState: true, ImportStateVerify: true, // The last_updated attribute does not exist in the Orchestration @@ -81,26 +82,88 @@ func TestMachineCatalogResourceAzure(t *testing.T) { }, //Update description, master image and add machine test { - Config: BuildMachineCatalogResourceAzure(t, machinecatalog_testResources_azure_updated), + Config: BuildMachineCatalogResourceAzure(t, machinecatalog_testResources_azure_updated, "ActiveDirectory"), Check: resource.ComposeAggregateTestCheckFunc( // Verify updated name of catalog - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "name", name), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "name", name), // Verify updated description - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "description", "updatedCatalog"), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "description", "updatedCatalog"), // Verify updated image - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "provisioning_scheme.azure_machine_config.master_image", os.Getenv("TEST_MC_MASTER_IMAGE_UPDATED")), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.azure_machine_config.master_image", os.Getenv("TEST_MC_MASTER_IMAGE_UPDATED")), + // Verify machine catalog identity type + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.identity_type", "ActiveDirectory"), // Verify total number of machines - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "provisioning_scheme.number_of_total_machines", "2"), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.number_of_total_machines", "2"), ), }, // Delete machine test { - Config: BuildMachineCatalogResourceAzure(t, machinecatalog_testResources_azure_delete_machine), + Config: BuildMachineCatalogResourceAzure(t, machinecatalog_testResources_azure_delete_machine, "ActiveDirectory"), + Check: resource.ComposeAggregateTestCheckFunc( + // Verify updated name of catalog + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "name", name), + // Verify total number of machines + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.number_of_total_machines", "1"), + // Verify machine catalog identity type + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.identity_type", "ActiveDirectory"), + ), + }, + //Delete testing automatically occurs in TestCase + }, + }) +} + +func TestHybridAzureADMachineCatalogResourceAzure(t *testing.T) { + name := os.Getenv("TEST_MC_NAME") + "-HybAAD" + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + PreCheck: func() { + TestProviderPreCheck(t) + TestHypervisorPreCheck_Azure(t) + TestHypervisorResourcePoolPreCheck_Azure(t) + TestMachineCatalogPreCheck_Azure(t) + }, + Steps: []resource.TestStep{ + // Create and Read testing + { + Config: BuildMachineCatalogResourceAzure(t, machinecatalog_testResources_azure, "HybridAzureAD"), + Check: resource.ComposeAggregateTestCheckFunc( + // Verify name of catalog + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "name", name), + // Verify domain FQDN + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "session_support", "MultiSession"), + // Verify machine catalog identity type + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.identity_type", "HybridAzureAD"), + // Verify domain admin username + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.machine_domain_identity.service_account", os.Getenv("TEST_MC_SERVICE_ACCOUNT")), + // Verify nic network + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.network_mapping.network", os.Getenv("TEST_MC_SUBNET")), + ), + }, + // ImportState testing + { + ResourceName: "citrix_machine_catalog.testMachineCatalog", + ImportState: true, + ImportStateVerify: true, + // The last_updated attribute does not exist in the Orchestration + // API, therefore there is no value for it during import. + ImportStateVerifyIgnore: []string{"provisioning_scheme.network_mapping", "provisioning_scheme.azure_machine_config.writeback_cache", "provisioning_scheme.machine_domain_identity.service_account", "provisioning_scheme.machine_config.service_account_password"}, + }, + // Update description, master image and add machine test + { + Config: BuildMachineCatalogResourceAzure(t, machinecatalog_testResources_azure_updated, "HybridAzureAD"), Check: resource.ComposeAggregateTestCheckFunc( // Verify updated name of catalog - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "name", name), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "name", name), + // Verify updated description + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "description", "updatedCatalog"), + // Verify updated image + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.azure_machine_config.master_image", os.Getenv("TEST_MC_MASTER_IMAGE_UPDATED")), + // Verify machine catalog identity type + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.identity_type", "HybridAzureAD"), // Verify total number of machines - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "provisioning_scheme.number_of_total_machines", "1"), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.number_of_total_machines", "2"), ), }, //Delete testing automatically occurs in TestCase @@ -149,7 +212,6 @@ func TestMachineCatalogResourceGCP(t *testing.T) { ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, PreCheck: func() { TestProviderPreCheck(t) - TestZonePreCheck(t) TestHypervisorPreCheck_GCP(t) TestHypervisorResourcePoolPreCheck_GCP(t) TestMachineCatalogPreCheck_GCP(t) @@ -160,18 +222,18 @@ func TestMachineCatalogResourceGCP(t *testing.T) { Config: BuildMachineCatalogResourceGCP(t, machinecatalog_testResources_gcp), Check: resource.ComposeAggregateTestCheckFunc( // Verify name of catalog - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "name", name), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "name", name), // Verify domain FQDN - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "session_support", "MultiSession"), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "session_support", "MultiSession"), // Verify domain admin username - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "provisioning_scheme.machine_domain_identity.service_account", os.Getenv("TEST_MC_SERVICE_ACCOUNT_GCP")), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.machine_domain_identity.service_account", os.Getenv("TEST_MC_SERVICE_ACCOUNT_GCP")), // Verify total number of machines - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "provisioning_scheme.number_of_total_machines", "1"), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.number_of_total_machines", "1"), ), }, // ImportState testing { - ResourceName: "citrix_daas_machine_catalog.testMachineCatalog", + ResourceName: "citrix_machine_catalog.testMachineCatalog", ImportState: true, ImportStateVerify: true, // The last_updated attribute does not exist in the Orchestration @@ -183,11 +245,11 @@ func TestMachineCatalogResourceGCP(t *testing.T) { Config: BuildMachineCatalogResourceGCP(t, machinecatalog_testResources_gcp_updated), Check: resource.ComposeAggregateTestCheckFunc( // Verify updated name of catalog - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "name", name), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "name", name), // Verify updated description - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "description", "updatedCatalog"), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "description", "updatedCatalog"), // Verify total number of machines - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "provisioning_scheme.number_of_total_machines", "2"), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "provisioning_scheme.number_of_total_machines", "2"), ), }, //Delete testing automatically occurs in TestCase @@ -195,7 +257,7 @@ func TestMachineCatalogResourceGCP(t *testing.T) { }) } -func TestMachineCatalogPreCheck_Mannual_Power_Managed_Azure(t *testing.T) { +func TestMachineCatalogPreCheck_Manual_Power_Managed_Azure(t *testing.T) { if v := os.Getenv("TEST_MC_NAME_MANUAL"); v == "" { t.Fatal("TEST_MC_NAME_MANUAL must be set for acceptance tests") } @@ -211,8 +273,11 @@ func TestMachineCatalogPreCheck_Mannual_Power_Managed_Azure(t *testing.T) { if v := os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED"); v == "" { t.Fatal("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED must be set for acceptance tests") } - if v := os.Getenv("TEST_MC_MACHINE_NAME_MANUAL_Azure"); v == "" { - t.Fatal("TEST_MC_MACHINE_NAME_MANUAL_Azure must be set for acceptance tests") + if v := os.Getenv("TEST_MC_MACHINE_NAME_MANUAL_AZURE"); v == "" { + t.Fatal("TEST_MC_MACHINE_NAME_MANUAL_AZURE must be set for acceptance tests") + } + if v := os.Getenv("TEST_MC_MACHINE_ACCOUNT_MANUAL_AZURE"); v == "" { + t.Fatal("TEST_MC_MACHINE_ACCOUNT_MANUAL_AZURE must be set for acceptance tests") } } @@ -223,9 +288,8 @@ func TestMachineCatalogResource_Manual_Power_Managed_Azure(t *testing.T) { ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, PreCheck: func() { TestProviderPreCheck(t) - TestZonePreCheck(t) TestHypervisorPreCheck_Azure(t) - TestMachineCatalogPreCheck_Mannual_Power_Managed_Azure(t) + TestMachineCatalogPreCheck_Manual_Power_Managed_Azure(t) }, Steps: []resource.TestStep{ // Create and Read testing @@ -233,16 +297,16 @@ func TestMachineCatalogResource_Manual_Power_Managed_Azure(t *testing.T) { Config: BuildMachineCatalogResourceManualPowerManagedAzure(t, machinecatalog_testResources_manual_power_managed_azure), Check: resource.ComposeAggregateTestCheckFunc( // Verify name of catalog - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalogManualPowerManaged", "name", name), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogManualPowerManaged", "name", name), // Verify session support - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalogManualPowerManaged", "session_support", os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED")), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogManualPowerManaged", "session_support", os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED")), // Verify total number of machines - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalogManualPowerManaged", "machine_accounts.#", "1"), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogManualPowerManaged", "machine_accounts.#", "1"), ), }, // ImportState testing { - ResourceName: "citrix_daas_machine_catalog.testMachineCatalogManualPowerManaged", + ResourceName: "citrix_machine_catalog.testMachineCatalogManualPowerManaged", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"machine_accounts", "is_remote_pc", "is_power_managed"}, @@ -271,6 +335,9 @@ func TestMachineCatalogPreCheck_Manual_Power_Managed_GCP(t *testing.T) { if v := os.Getenv("TEST_MC_MACHINE_NAME_MANUAL_GCP"); v == "" { t.Fatal("TEST_MC_MACHINE_NAME_MANUAL_GCP must be set for acceptance tests") } + if v := os.Getenv("TEST_MC_MACHINE_ACCOUNT_MANUAL_GCP"); v == "" { + t.Fatal("TEST_MC_MACHINE_ACCOUNT_MANUAL_GCP must be set for acceptance tests") + } } func TestMachineCatalogResource_Manual_Power_Managed_GCP(t *testing.T) { @@ -280,7 +347,6 @@ func TestMachineCatalogResource_Manual_Power_Managed_GCP(t *testing.T) { ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, PreCheck: func() { TestProviderPreCheck(t) - TestZonePreCheck(t) TestHypervisorPreCheck_GCP(t) TestMachineCatalogPreCheck_Manual_Power_Managed_GCP(t) }, @@ -290,16 +356,16 @@ func TestMachineCatalogResource_Manual_Power_Managed_GCP(t *testing.T) { Config: BuildMachineCatalogResourceManualPowerManagedGCP(t, machinecatalog_testResources_manual_power_managed_gcp), Check: resource.ComposeAggregateTestCheckFunc( // Verify name of catalog - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalogManualPowerManaged", "name", name), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogManualPowerManaged", "name", name), // Verify session support - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalogManualPowerManaged", "session_support", os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED")), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogManualPowerManaged", "session_support", os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED")), // Verify total number of machines - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalogManualPowerManaged", "machine_accounts.#", "1"), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogManualPowerManaged", "machine_accounts.#", "1"), ), }, // ImportState testing { - ResourceName: "citrix_daas_machine_catalog.testMachineCatalogManualPowerManaged", + ResourceName: "citrix_machine_catalog.testMachineCatalogManualPowerManaged", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"machine_accounts", "is_remote_pc", "is_power_managed"}, @@ -309,12 +375,177 @@ func TestMachineCatalogResource_Manual_Power_Managed_GCP(t *testing.T) { }) } -func TestMachineCatalogPreCheck_Mannual_Non_Power_Managed(t *testing.T) { +func TestMachineCatalogPreCheck_Manual_Power_Managed_Vsphere(t *testing.T) { if v := os.Getenv("TEST_MC_NAME_MANUAL"); v == "" { t.Fatal("TEST_MC_NAME_MANUAL must be set for acceptance tests") } - if v := os.Getenv("TEST_MC_MACHINE_NAME_MANUAL_NON_POWER_MANAGED"); v == "" { - t.Fatal("TEST_MC_MACHINE_NAME_MANUAL_NON_POWER_MANAGED must be set for acceptance tests") + if v := os.Getenv("TEST_MC_DATACENTER_VSPHERE"); v == "" { + t.Fatal("TEST_MC_DATACENTER_VSPHERE must be set for acceptance tests") + } + if v := os.Getenv("TEST_MC_HOST_VSPHERE"); v == "" { + t.Fatal("TEST_MC_HOST_VSPHERE must be set for acceptance tests") + } + if v := os.Getenv("TEST_MC_MACHINE_NAME_MANUAL_VSPHERE"); v == "" { + t.Fatal("TEST_MC_MACHINE_NAME_MANUAL_VSPHERE must be set for acceptance tests") + } + if v := os.Getenv("TEST_MC_MACHINE_ACCOUNT_MANUAL_VSPHERE"); v == "" { + t.Fatal("TEST_MC_ALLOCATION_TYPE_MANUAL_POWER_MANAGED must be set for acceptance tests") + } + if v := os.Getenv("TEST_MC_ALLOCATION_TYPE_MANUAL_POWER_MANAGED"); v == "" { + t.Fatal("TEST_MC_ALLOCATION_TYPE_MANUAL_POWER_MANAGED must be set for acceptance tests") + } + if v := os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED"); v == "" { + t.Fatal("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED must be set for acceptance tests") + } +} + +func TestMachineCatalogResource_Manual_Power_Managed_Vsphere(t *testing.T) { + name := os.Getenv("TEST_MC_NAME_MANUAL") + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + PreCheck: func() { + TestProviderPreCheck(t) + TestHypervisorPreCheck_Vsphere(t) + TestMachineCatalogPreCheck_Manual_Power_Managed_Vsphere(t) + }, + Steps: []resource.TestStep{ + // Create and Read testing + { + Config: BuildMachineCatalogResourceManualPowerManagedVsphere(t, machinecatalog_testResources_manual_power_managed_vsphere), + Check: resource.ComposeAggregateTestCheckFunc( + // Verify name of catalog + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogManualPowerManaged", "name", name), + // Verify session support + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogManualPowerManaged", "session_support", os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED")), + // Verify total number of machines + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogManualPowerManaged", "machine_accounts.#", "1"), + ), + }, + // ImportState testing + { + ResourceName: "citrix_machine_catalog.testMachineCatalogManualPowerManaged", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"machine_accounts", "is_remote_pc", "is_power_managed"}, + }, + //Delete testing automatically occurs in TestCase + }, + }) +} + +func TestMachineCatalogPreCheck_Manual_Power_Managed_Xenserver(t *testing.T) { + if v := os.Getenv("TEST_MC_NAME_MANUAL"); v == "" { + t.Fatal("TEST_MC_NAME_MANUAL must be set for acceptance tests") + } + if v := os.Getenv("TEST_MC_MACHINE_NAME_MANUAL_XENSERVER"); v == "" { + t.Fatal("TEST_MC_MACHINE_NAME_MANUAL_XENSERVER must be set for acceptance tests") + } + if v := os.Getenv("TEST_MC_MACHINE_ACCOUNT_MANUAL_XENSERVER"); v == "" { + t.Fatal("TEST_MC_MACHINE_ACCOUNT_MANUAL_XENSERVER must be set for acceptance tests") + } + if v := os.Getenv("TEST_MC_ALLOCATION_TYPE_MANUAL_POWER_MANAGED"); v == "" { + t.Fatal("TEST_MC_ALLOCATION_TYPE_MANUAL_POWER_MANAGED must be set for acceptance tests") + } + if v := os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED"); v == "" { + t.Fatal("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED must be set for acceptance tests") + } +} + +func TestMachineCatalogResource_Manual_Power_Managed_Xenserver(t *testing.T) { + name := os.Getenv("TEST_MC_NAME_MANUAL") + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + PreCheck: func() { + TestProviderPreCheck(t) + TestHypervisorPreCheck_Xenserver(t) + TestMachineCatalogPreCheck_Manual_Power_Managed_Xenserver(t) + }, + Steps: []resource.TestStep{ + // Create and Read testing + { + Config: BuildMachineCatalogResourceManualPowerManagedXenserver(t, machinecatalog_testResources_manual_power_managed_xenserver), + Check: resource.ComposeAggregateTestCheckFunc( + // Verify name of catalog + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogManualPowerManaged", "name", name), + // Verify session support + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogManualPowerManaged", "session_support", os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED")), + // Verify total number of machines + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogManualPowerManaged", "machine_accounts.#", "1"), + ), + }, + // ImportState testing + { + ResourceName: "citrix_machine_catalog.testMachineCatalogManualPowerManaged", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"machine_accounts", "is_remote_pc", "is_power_managed"}, + }, + //Delete testing automatically occurs in TestCase + }, + }) +} + +func TestMachineCatalogPreCheck_Manual_Power_Managed_Nutanix(t *testing.T) { + if v := os.Getenv("TEST_MC_NAME_MANUAL"); v == "" { + t.Fatal("TEST_MC_NAME_MANUAL must be set for acceptance tests") + } + if v := os.Getenv("TEST_MC_MACHINE_NAME_MANUAL_NUTANIX"); v == "" { + t.Fatal("TEST_MC_MACHINE_NAME_MANUAL_NUTANIX must be set for acceptance tests") + } + if v := os.Getenv("TEST_MC_MACHINE_ACCOUNT_MANUAL_NUTANIX"); v == "" { + t.Fatal("TEST_MC_MACHINE_ACCOUNT_MANUAL_NUTANIX must be set for acceptance tests") + } + if v := os.Getenv("TEST_MC_ALLOCATION_TYPE_MANUAL_POWER_MANAGED"); v == "" { + t.Fatal("TEST_MC_ALLOCATION_TYPE_MANUAL_POWER_MANAGED must be set for acceptance tests") + } + if v := os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED"); v == "" { + t.Fatal("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED must be set for acceptance tests") + } +} + +func TestMachineCatalogResource_Manual_Power_Managed_Nutanix(t *testing.T) { + name := os.Getenv("TEST_MC_NAME_MANUAL") + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + PreCheck: func() { + TestProviderPreCheck(t) + TestHypervisorPreCheck_Nutanix(t) + TestMachineCatalogPreCheck_Manual_Power_Managed_Nutanix(t) + }, + Steps: []resource.TestStep{ + // Create and Read testing + { + Config: BuildMachineCatalogResourceManualPowerManagedNutanix(t, machinecatalog_testResources_manual_power_managed_nutanix), + Check: resource.ComposeAggregateTestCheckFunc( + // Verify name of catalog + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogManualPowerManaged", "name", name), + // Verify session support + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogManualPowerManaged", "session_support", os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED")), + // Verify total number of machines + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogManualPowerManaged", "machine_accounts.#", "1"), + ), + }, + // ImportState testing + { + ResourceName: "citrix_machine_catalog.testMachineCatalogManualPowerManaged", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"machine_accounts", "is_remote_pc", "is_power_managed"}, + }, + //Delete testing automatically occurs in TestCase + }, + }) +} + +func TestMachineCatalogPreCheck_Manual_Non_Power_Managed(t *testing.T) { + if v := os.Getenv("TEST_MC_NAME_MANUAL"); v == "" { + t.Fatal("TEST_MC_NAME_MANUAL must be set for acceptance tests") + } + if v := os.Getenv("TEST_MC_MACHINE_ACCOUNT_MANUAL_NON_POWER_MANAGED"); v == "" { + t.Fatal("TEST_MC_MACHINE_ACCOUNT_MANUAL_NON_POWER_MANAGED must be set for acceptance tests") } if v := os.Getenv("TEST_MC_ALLOCATION_TYPE_MANUAL_NON_POWER_MANAGED"); v == "" { t.Fatal("TEST_MC_ALLOCATION_TYPE_MANUAL_NON_POWER_MANAGED must be set for acceptance tests") @@ -332,8 +563,7 @@ func TestMachineCatalogResource_Manual_Non_Power_Managed(t *testing.T) { PreCheck: func() { TestProviderPreCheck(t) TestZonePreCheck(t) - TestHypervisorPreCheck_Azure(t) - TestMachineCatalogPreCheck_Mannual_Non_Power_Managed(t) + TestMachineCatalogPreCheck_Manual_Non_Power_Managed(t) }, Steps: []resource.TestStep{ // Create and Read testing @@ -341,16 +571,16 @@ func TestMachineCatalogResource_Manual_Non_Power_Managed(t *testing.T) { Config: BuildMachineCatalogResourceManualNonPowerManaged(t, machinecatalog_testResources_manual_non_power_managed), Check: resource.ComposeAggregateTestCheckFunc( // Verify name of catalog - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalogNonManualPowerManaged", "name", name), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogNonManualPowerManaged", "name", name), // Verify session support - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalogNonManualPowerManaged", "session_support", os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_NON_POWER_MANAGED")), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogNonManualPowerManaged", "session_support", os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_NON_POWER_MANAGED")), // Verify total number of machines - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalogNonManualPowerManaged", "machine_accounts.#", "1"), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalogNonManualPowerManaged", "machine_accounts.#", "1"), ), }, // ImportState testing { - ResourceName: "citrix_daas_machine_catalog.testMachineCatalogNonManualPowerManaged", + ResourceName: "citrix_machine_catalog.testMachineCatalogNonManualPowerManaged", ImportState: true, ImportStateVerify: true, // The last_updated attribute does not exist in the Orchestration @@ -366,8 +596,8 @@ func TestMachineCatalogPreCheck_RemotePC(t *testing.T) { if v := os.Getenv("TEST_MC_NAME_REMOTE_PC"); v == "" { t.Fatal("TEST_MC_NAME_REMOTE_PC must be set for acceptance tests") } - if v := os.Getenv("TEST_MC_MACHINE_NAME_REMOTE_PC"); v == "" { - t.Fatal("TEST_MC_MACHINE_NAME_REMOTE_PC must be set for acceptance tests") + if v := os.Getenv("TEST_MC_MACHINE_ACCOUNT_REMOTE_PC"); v == "" { + t.Fatal("TEST_MC_MACHINE_ACCOUNT_REMOTE_PC must be set for acceptance tests") } if v := os.Getenv("TEST_MC_ALLOCATION_TYPE_REMOTE_PC"); v == "" { t.Fatal("TEST_MC_ALLOCATION_TYPE_REMOTE_PC must be set for acceptance tests") @@ -396,14 +626,14 @@ func TestMachineCatalogResource_RemotePC(t *testing.T) { Config: BuildMachineCatalogResourceRemotePC(t, machinecatalog_testResources_remote_pc), Check: resource.ComposeAggregateTestCheckFunc( // Verify name of catalog - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "name", name), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "name", name), // Verify total number of machines - resource.TestCheckResourceAttr("citrix_daas_machine_catalog.testMachineCatalog", "machine_accounts.#", "1"), + resource.TestCheckResourceAttr("citrix_machine_catalog.testMachineCatalog", "machine_accounts.#", "1"), ), }, // ImportState testing { - ResourceName: "citrix_daas_machine_catalog.testMachineCatalog", + ResourceName: "citrix_machine_catalog.testMachineCatalog", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"machine_accounts", "is_remote_pc", "is_power_managed"}, @@ -415,16 +645,16 @@ func TestMachineCatalogResource_RemotePC(t *testing.T) { var ( machinecatalog_testResources_azure = ` -resource "citrix_daas_machine_catalog" "testMachineCatalog" { +resource "citrix_machine_catalog" "testMachineCatalog" { name = "%s" description = "on prem catalog for import testing" allocation_type = "Random" session_support = "MultiSession" provisioning_type = "MCS" provisioning_scheme = { - hypervisor = citrix_daas_azure_hypervisor.testHypervisor.id - hypervisor_resource_pool = citrix_daas_azure_hypervisor_resource_pool.testHypervisorResourcePool.id - identity_type = "ActiveDirectory" + hypervisor = citrix_azure_hypervisor.testHypervisor.id + hypervisor_resource_pool = citrix_azure_hypervisor_resource_pool.testHypervisorResourcePool.id + identity_type = "%s" machine_domain_identity = { domain = "%s" service_account = "%s" @@ -458,20 +688,20 @@ resource "citrix_daas_machine_catalog" "testMachineCatalog" { } } - zone = citrix_daas_zone.test.id + zone = citrix_zone.test.id } ` machinecatalog_testResources_azure_updated = ` - resource "citrix_daas_machine_catalog" "testMachineCatalog" { + resource "citrix_machine_catalog" "testMachineCatalog" { name = "%s" description = "updatedCatalog" allocation_type = "Random" session_support = "MultiSession" provisioning_type = "MCS" provisioning_scheme = { - hypervisor = citrix_daas_azure_hypervisor.testHypervisor.id - hypervisor_resource_pool = citrix_daas_azure_hypervisor_resource_pool.testHypervisorResourcePool.id - identity_type = "ActiveDirectory" + hypervisor = citrix_azure_hypervisor.testHypervisor.id + hypervisor_resource_pool = citrix_azure_hypervisor_resource_pool.testHypervisorResourcePool.id + identity_type = "%s" machine_domain_identity = { domain = "%s" service_account = "%s" @@ -505,21 +735,21 @@ resource "citrix_daas_machine_catalog" "testMachineCatalog" { naming_scheme_type ="Numeric" } } - zone = citrix_daas_zone.test.id + zone = citrix_zone.test.id } ` machinecatalog_testResources_azure_delete_machine = ` - resource "citrix_daas_machine_catalog" "testMachineCatalog" { + resource "citrix_machine_catalog" "testMachineCatalog" { name = "%s" description = "updatedCatalog" allocation_type = "Random" session_support = "MultiSession" provisioning_type = "MCS" provisioning_scheme = { - hypervisor = citrix_daas_azure_hypervisor.testHypervisor.id - hypervisor_resource_pool = citrix_daas_azure_hypervisor_resource_pool.testHypervisorResourcePool.id - identity_type = "ActiveDirectory" + hypervisor = citrix_azure_hypervisor.testHypervisor.id + hypervisor_resource_pool = citrix_azure_hypervisor_resource_pool.testHypervisorResourcePool.id + identity_type = "%s" machine_domain_identity = { domain = "%s" service_account = "%s" @@ -554,21 +784,21 @@ resource "citrix_daas_machine_catalog" "testMachineCatalog" { naming_scheme_type ="Numeric" } } - zone = citrix_daas_zone.test.id + zone = citrix_zone.test.id } ` machinecatalog_testResources_gcp = ` - resource "citrix_daas_machine_catalog" "testMachineCatalog" { + resource "citrix_machine_catalog" "testMachineCatalog" { name = "%s" description = "on prem catalog for import testing" allocation_type = "Random" session_support = "MultiSession" provisioning_type = "MCS" provisioning_scheme = { - hypervisor = citrix_daas_gcp_hypervisor.testHypervisor.id - hypervisor_resource_pool = citrix_daas_gcp_hypervisor_resource_pool.testHypervisorResourcePool.id - identity_type = "ActiveDirectory" + hypervisor = citrix_gcp_hypervisor.testHypervisor.id + hypervisor_resource_pool = citrix_gcp_hypervisor_resource_pool.testHypervisorResourcePool.id + identity_type = "%s" machine_domain_identity = { domain = "%s" service_account = "%s" @@ -587,21 +817,21 @@ resource "citrix_daas_machine_catalog" "testMachineCatalog" { naming_scheme_type ="Numeric" } } - zone = citrix_daas_zone.test.id + zone = citrix_zone.test.id } ` machinecatalog_testResources_gcp_updated = ` - resource "citrix_daas_machine_catalog" "testMachineCatalog" { + resource "citrix_machine_catalog" "testMachineCatalog" { name = "%s" description = "updatedCatalog" allocation_type = "Random" session_support = "MultiSession" provisioning_type = "MCS" provisioning_scheme = { - hypervisor = citrix_daas_gcp_hypervisor.testHypervisor.id - hypervisor_resource_pool = citrix_daas_gcp_hypervisor_resource_pool.testHypervisorResourcePool.id - identity_type = "ActiveDirectory" + hypervisor = citrix_gcp_hypervisor.testHypervisor.id + hypervisor_resource_pool = citrix_gcp_hypervisor_resource_pool.testHypervisorResourcePool.id + identity_type = "%s" machine_domain_identity = { domain = "%s" service_account = "%s" @@ -620,15 +850,15 @@ resource "citrix_daas_machine_catalog" "testMachineCatalog" { naming_scheme_type ="Numeric" } } - zone = citrix_daas_zone.test.id + zone = citrix_zone.test.id } ` machinecatalog_testResources_manual_power_managed_azure = ` - resource "citrix_daas_machine_catalog" "testMachineCatalogManualPowerManaged" { + resource "citrix_machine_catalog" "testMachineCatalogManualPowerManaged" { name = "%s" description = "manual power managed multi-session catalog testing for Azure Hypervisor" - zone = citrix_daas_zone.test.id + zone = citrix_zone.test.id allocation_type = "%s" session_support = "%s" is_power_managed = true @@ -636,12 +866,13 @@ resource "citrix_daas_machine_catalog" "testMachineCatalog" { provisioning_type = "Manual" machine_accounts = [ { - hypervisor = citrix_daas_azure_hypervisor.testHypervisor.id + hypervisor = citrix_azure_hypervisor.testHypervisor.id machines = [ { region = "%s" resource_group_name = "%s" machine_name = "%s" + machine_account = "%s" } ] } @@ -650,10 +881,10 @@ resource "citrix_daas_machine_catalog" "testMachineCatalog" { ` machinecatalog_testResources_manual_power_managed_gcp = ` - resource "citrix_daas_machine_catalog" "testMachineCatalogManualPowerManaged" { + resource "citrix_machine_catalog" "testMachineCatalogManualPowerManaged" { name = "%s" description = "manual power managed multi-session catalog testing" - zone = citrix_daas_zone.test.id + zone = citrix_zone.test.id allocation_type = "%s" session_support = "%s" is_power_managed = true @@ -661,12 +892,87 @@ resource "citrix_daas_machine_catalog" "testMachineCatalog" { provisioning_type = "Manual" machine_accounts = [ { - hypervisor = citrix_daas_gcp_hypervisor.testHypervisor.id + hypervisor = citrix_gcp_hypervisor.testHypervisor.id machines = [ { region = "%s" project_name = "%s" machine_name = "%s" + machine_account = "%s" + } + ] + } + ] + } + ` + + machinecatalog_testResources_manual_power_managed_vsphere = ` + resource "citrix_machine_catalog" "testMachineCatalogManualPowerManaged" { + name = "%s" + description = "manual power managed multi-session catalog testing" + is_power_managed = true + is_remote_pc = false + provisioning_type = "Manual" + allocation_type = "%s" + session_support = "%s" + zone = citrix_zone.test.id + machine_accounts = [ + { + hypervisor = citrix_vsphere_hypervisor.testHypervisor.id + machines = [ + { + datacenter = "%s" + host = "%s" + machine_name = "%s" + machine_account = "%s" + } + ] + } + ] + } + ` + + machinecatalog_testResources_manual_power_managed_xenserver = ` + resource "citrix_machine_catalog" "testMachineCatalogManualPowerManaged" { + name = "%s" + description = "manual power managed multi-session catalog testing" + is_power_managed = true + is_remote_pc = false + provisioning_type = "Manual" + allocation_type = "%s" + session_support = "%s" + zone = citrix_zone.test.id + machine_accounts = [ + { + hypervisor = citrix_xenserver_hypervisor.testHypervisor.id + machines = [ + { + machine_name = "%s" + machine_account = "%s" + } + ] + } + ] + } + ` + + machinecatalog_testResources_manual_power_managed_nutanix = ` + resource "citrix_machine_catalog" "testMachineCatalogManualPowerManaged" { + name = "%s" + description = "manual power managed multi-session catalog testing" + is_power_managed = true + is_remote_pc = false + provisioning_type = "Manual" + allocation_type = "%s" + session_support = "%s" + zone = citrix_zone.test.id + machine_accounts = [ + { + hypervisor = citrix_nutanix_hypervisor.testHypervisor.id + machines = [ + { + machine_name = "%s" + machine_account = "%s" } ] } @@ -675,10 +981,10 @@ resource "citrix_daas_machine_catalog" "testMachineCatalog" { ` machinecatalog_testResources_manual_non_power_managed = ` - resource "citrix_daas_machine_catalog" "testMachineCatalogNonManualPowerManaged" { + resource "citrix_machine_catalog" "testMachineCatalogNonManualPowerManaged" { name = "%s" description = "manual non power managed multi-session catalog testing" - zone = citrix_daas_zone.test.id + zone = citrix_zone.test.id allocation_type = "%s" session_support = "%s" is_power_managed = false @@ -688,7 +994,7 @@ resource "citrix_daas_machine_catalog" "testMachineCatalog" { { machines = [ { - machine_name = "%s" + machine_account = "%s" } ] } @@ -696,7 +1002,7 @@ resource "citrix_daas_machine_catalog" "testMachineCatalog" { } ` machinecatalog_testResources_remote_pc = ` - resource "citrix_daas_machine_catalog" "testMachineCatalog" { + resource "citrix_machine_catalog" "testMachineCatalog" { name = "%s" description = "on prem catalog for import testing remotePC" allocation_type = "%s" @@ -708,7 +1014,7 @@ resource "citrix_daas_machine_catalog" "testMachineCatalog" { { machines = [ { - machine_name = "%s" + machine_account = "%s" } ] } @@ -719,13 +1025,16 @@ resource "citrix_daas_machine_catalog" "testMachineCatalog" { ou_name = "%s" } ] - zone = citrix_daas_zone.test.id + zone = citrix_zone.test.id } ` ) -func BuildMachineCatalogResourceAzure(t *testing.T, machineResource string) string { +func BuildMachineCatalogResourceAzure(t *testing.T, machineResource string, identityType string) string { name := os.Getenv("TEST_MC_NAME") + if identityType == "HybridAzureAD" { + name += "-HybAAD" + } service_account := os.Getenv("TEST_MC_SERVICE_ACCOUNT") service_account_pass := os.Getenv("TEST_MC_SERVICE_ACCOUNT_PASS") service_offering := os.Getenv("TEST_MC_SERVICE_OFFERING") @@ -741,11 +1050,12 @@ func BuildMachineCatalogResourceAzure(t *testing.T, machineResource string) stri //machine account domain := os.Getenv("TEST_MC_DOMAIN") - return BuildHypervisorResourcePoolResourceAzure(t, hypervisor_resource_pool_testResource_azure) + fmt.Sprintf(machineResource, name, domain, service_account, service_account_pass, service_offering, resource_group, storage_account, container, master_image, subnet) + return BuildHypervisorResourcePoolResourceAzure(t, hypervisor_resource_pool_testResource_azure) + fmt.Sprintf(machineResource, name, identityType, domain, service_account, service_account_pass, service_offering, resource_group, storage_account, container, master_image, subnet) } func BuildMachineCatalogResourceGCP(t *testing.T, machineResource string) string { name := os.Getenv("TEST_MC_NAME_GCP") + identityType := "ActiveDirectory" service_account := os.Getenv("TEST_MC_SERVICE_ACCOUNT_GCP") service_account_pass := os.Getenv("TEST_MC_SERVICE_ACCOUNT_PASS_GCP") storage_type := os.Getenv("TEST_MC_STORAGE_TYPE_GCP") @@ -757,46 +1067,84 @@ func BuildMachineCatalogResourceGCP(t *testing.T, machineResource string) string //machine account domain := os.Getenv("TEST_MC_DOMAIN_GCP") - return BuildHypervisorResourcePoolResourceGCP(t, hypervisor_resource_pool_testResource_gcp) + fmt.Sprintf(machineResource, name, domain, service_account, service_account_pass, storage_type, machine_profile, master_image, machine_snapshot, availability_zones) + return BuildHypervisorResourcePoolResourceGCP(t, hypervisor_resource_pool_testResource_gcp) + fmt.Sprintf(machineResource, name, identityType, domain, service_account, service_account_pass, storage_type, machine_profile, master_image, machine_snapshot, availability_zones) } func BuildMachineCatalogResourceManualPowerManagedAzure(t *testing.T, machineResource string) string { name := os.Getenv("TEST_MC_NAME_MANUAL") - machine_name := os.Getenv("TEST_MC_MACHINE_NAME_MANUAL_Azure") + machine_name := os.Getenv("TEST_MC_MACHINE_NAME_MANUAL_AZURE") + machine_account := os.Getenv("TEST_MC_MACHINE_ACCOUNT_MANUAL_AZURE") region := os.Getenv("TEST_MC_REGION_MANUAL_POWER_MANAGED") resource_group := os.Getenv("TEST_MC_RESOURCE_GROUP_MANUAL_POWER_MANAGED") allocation_type := os.Getenv("TEST_MC_ALLOCATION_TYPE_MANUAL_POWER_MANAGED") session_support := os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED") - return BuildHypervisorResourceAzure(t, hypervisor_testResources) + fmt.Sprintf(machineResource, name, allocation_type, session_support, region, resource_group, machine_name) + return BuildHypervisorResourceAzure(t, hypervisor_testResources) + fmt.Sprintf(machineResource, name, allocation_type, session_support, region, resource_group, machine_name, machine_account) } func BuildMachineCatalogResourceManualPowerManagedGCP(t *testing.T, machineResource string) string { name := os.Getenv("TEST_MC_NAME_MANUAL") machine_name := os.Getenv("TEST_MC_MACHINE_NAME_MANUAL_GCP") + machine_account := os.Getenv("TEST_MC_MACHINE_ACCOUNT_MANUAL_GCP") region := os.Getenv("TEST_MC_REGION_MANUAL_POWER_MANAGED_GCP") project_name := os.Getenv("TEST_MC_PROJECT_NAME_MANUAL_POWER_MANAGED") allocation_type := os.Getenv("TEST_MC_ALLOCATION_TYPE_MANUAL_POWER_MANAGED") session_support := os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED") - return BuildHypervisorResourcePoolResourceGCP(t, hypervisor_resource_pool_testResource_gcp) + fmt.Sprintf(machineResource, name, allocation_type, session_support, region, project_name, machine_name) + return BuildHypervisorResourceGCP(t, hypervisor_testResources_gcp) + fmt.Sprintf(machineResource, name, allocation_type, session_support, region, project_name, machine_name, machine_account) +} + +func BuildMachineCatalogResourceManualPowerManagedVsphere(t *testing.T, machineResource string) string { + name := os.Getenv("TEST_MC_NAME_MANUAL") + datacenter := os.Getenv("TEST_MC_DATACENTER_VSPHERE") + host := os.Getenv("TEST_MC_HOST_VSPHERE") + machine_name := os.Getenv("TEST_MC_MACHINE_NAME_MANUAL_VSPHERE") + machine_account := os.Getenv("TEST_MC_MACHINE_ACCOUNT_MANUAL_VSPHERE") + allocation_type := os.Getenv("TEST_MC_ALLOCATION_TYPE_MANUAL_POWER_MANAGED") + session_support := os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED") + + return BuildHypervisorResourceVsphere(t, hypervisor_testResources_vsphere) + fmt.Sprintf(machineResource, name, allocation_type, session_support, datacenter, host, machine_name, machine_account) +} + +func BuildMachineCatalogResourceManualPowerManagedXenserver(t *testing.T, machineResource string) string { + name := os.Getenv("TEST_MC_NAME_MANUAL") + machine_name := os.Getenv("TEST_MC_MACHINE_NAME_MANUAL_XENSERVER") + machine_account := os.Getenv("TEST_MC_MACHINE_ACCOUNT_MANUAL_XENSERVER") + allocation_type := os.Getenv("TEST_MC_ALLOCATION_TYPE_MANUAL_POWER_MANAGED") + session_support := os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED") + + return BuildHypervisorResourceXenserver(t, hypervisor_testResources_xenserver) + fmt.Sprintf(machineResource, name, allocation_type, session_support, machine_name, machine_account) +} + +func BuildMachineCatalogResourceManualPowerManagedNutanix(t *testing.T, machineResource string) string { + name := os.Getenv("TEST_MC_NAME_MANUAL") + machine_name := os.Getenv("TEST_MC_MACHINE_NAME_MANUAL_NUTANIX") + machine_account := os.Getenv("TEST_MC_MACHINE_ACCOUNT_MANUAL_NUTANIX") + allocation_type := os.Getenv("TEST_MC_ALLOCATION_TYPE_MANUAL_POWER_MANAGED") + session_support := os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED") + + return BuildHypervisorResourceNutanix(t, hypervisor_testResources_nutanix) + fmt.Sprintf(machineResource, name, allocation_type, session_support, machine_name, machine_account) } func BuildMachineCatalogResourceManualNonPowerManaged(t *testing.T, machineResource string) string { name := os.Getenv("TEST_MC_NAME_MANUAL") - machine_name := os.Getenv("TEST_MC_MACHINE_NAME_MANUAL_NON_POWER_MANAGED") + machine_account := os.Getenv("TEST_MC_MACHINE_ACCOUNT_MANUAL_NON_POWER_MANAGED") allocation_type := os.Getenv("TEST_MC_ALLOCATION_TYPE_MANUAL_NON_POWER_MANAGED") session_support := os.Getenv("TEST_MC_SESSION_SUPPORT_MANUAL_NON_POWER_MANAGED") - return BuildZoneResource(t, zone_testResource) + fmt.Sprintf(machineResource, name, allocation_type, session_support, machine_name) + zoneName := os.Getenv("TEST_ZONE_NAME") + + return BuildZoneResource(t, zone_testResource, zoneName) + fmt.Sprintf(machineResource, name, allocation_type, session_support, machine_account) } func BuildMachineCatalogResourceRemotePC(t *testing.T, machineResource string) string { name := os.Getenv("TEST_MC_NAME_REMOTE_PC") - machine_name := os.Getenv("TEST_MC_MACHINE_NAME_REMOTE_PC") + machine_account := os.Getenv("TEST_MC_MACHINE_ACCOUNT_REMOTE_PC") allocation_type := os.Getenv("TEST_MC_ALLOCATION_TYPE_REMOTE_PC") ou := os.Getenv("TEST_MC_OU_REMOTE_PC") include_subfolders := os.Getenv("TEST_MC_INCLUDE_SUBFOLDERS_REMOTE_PC") - return BuildZoneResource(t, zone_testResource) + fmt.Sprintf(machineResource, name, allocation_type, machine_name, include_subfolders, ou) + zoneName := os.Getenv("TEST_ZONE_NAME") + + return BuildZoneResource(t, zone_testResource, zoneName) + fmt.Sprintf(machineResource, name, allocation_type, machine_account, include_subfolders, ou) } diff --git a/internal/test/policy_set_resource_test.go b/internal/test/policy_set_resource_test.go new file mode 100644 index 0000000..c2ba391 --- /dev/null +++ b/internal/test/policy_set_resource_test.go @@ -0,0 +1,247 @@ +// Copyright © 2023. Citrix Systems, Inc. + +package test + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" +) + +var ( + policy_set_testResource = ` +resource "citrix_policy_set" "testPolicySet" { + name = "%s" + description = "Test policy set description" + scopes = [ "All" ] + type = "DeliveryGroupPolicies" + policies = [ + { + name = "first-test-policy" + description = "First test policy with priority 0" + is_enabled = true + policy_settings = [ + { + name = "AdvanceWarningPeriod" + value = "13:00:00" + use_default = false + }, + ] + policy_filters = [ + { + type = "DesktopGroup" + data = jsonencode({ + "server" = "%s" + "uuid" = citrix_delivery_group.testDeliveryGroup.id + }) + is_enabled = true + is_allowed = true + }, + ] + }, + { + name = "second-test-policy" + description = "Second test policy with priority 1" + is_enabled = false + policy_settings = [ + { + name = "AdvanceWarningPeriod" + value = "17:00:00" + use_default = false + }, + ] + policy_filters = [] + } + ] +} +` + + policy_set_reordered_testResource = ` +resource "citrix_policy_set" "testPolicySet" { + name = "%s" + description = "Test policy set description" + scopes = [ "All" ] + type = "DeliveryGroupPolicies" + policies = [ + { + name = "second-test-policy" + description = "Second test policy with priority 1" + is_enabled = false + policy_settings = [ + { + name = "AdvanceWarningPeriod" + value = "17:00:00" + use_default = false + }, + ] + policy_filters = [] + }, + { + name = "first-test-policy" + description = "First test policy with priority 0" + is_enabled = true + policy_settings = [ + { + name = "AdvanceWarningPeriod" + value = "13:00:00" + use_default = false + }, + ] + policy_filters = [ + { + type = "DesktopGroup" + data = jsonencode({ + "server" = "%s" + "uuid" = citrix_delivery_group.testDeliveryGroup.id + }) + is_enabled = true + is_allowed = true + }, + ] + } + ] +} +` + + policy_set_updated_testResource = ` +resource "citrix_policy_set" "testPolicySet" { + name = "%s" + description = "Test policy set description updated" + scopes = [ "All" ] + type = "DeliveryGroupPolicies" + policies = [ + { + name = "first-test-policy" + description = "First test policy with priority 0" + is_enabled = true + policy_settings = [ + { + name = "AdvanceWarningPeriod" + value = "13:00:00" + use_default = false + }, + ] + policy_filters = [ + { + type = "DesktopGroup" + data = jsonencode({ + "server" = "%s" + "uuid" = citrix_delivery_group.testDeliveryGroup.id + }) + is_enabled = true + is_allowed = true + }, + ] + } + ] +} +` +) + +func TestPolicySetResourcePreCheck(t *testing.T) { + if v := os.Getenv("TEST_POLICY_SET_NAME"); v == "" { + t.Fatal("TEST_POLICY_SET_NAME must be set for acceptance tests") + } + + if v := os.Getenv("CITRIX_DDC_HOST_NAME"); v == "" { + t.Fatal("CITRIX_DDC_HOST_NAME must be set for acceptance tests") + } +} + +func BuildPolicySetResource(t *testing.T, policySet string) string { + policySetName := os.Getenv("TEST_POLICY_SET_NAME") + ddcServerHostName := os.Getenv("CITRIX_DDC_HOST_NAME") + + return BuildDeliveryGroupResource(t, testDeliveryGroupResources_updated) + fmt.Sprintf(policySet, policySetName, ddcServerHostName) +} + +func TestPolicySetResource(t *testing.T) { + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + PreCheck: func() { + TestProviderPreCheck(t) + TestHypervisorPreCheck_Azure(t) + TestHypervisorResourcePoolPreCheck_Azure(t) + TestMachineCatalogPreCheck_Azure(t) + TestDeliveryGroupPreCheck(t) + TestPolicySetResourcePreCheck(t) + }, + Steps: []resource.TestStep{ + // Create and Read testing + { + Config: BuildPolicySetResource(t, policy_set_testResource), + Check: resource.ComposeAggregateTestCheckFunc( + // Verify name of the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "name", os.Getenv("TEST_POLICY_SET_NAME")), + // Verify description of the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "description", "Test policy set description"), + // Verify type of the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "type", "DeliveryGroupPolicies"), + // Verify the number of scopes of the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "scopes.#", "1"), + // Verify the scopes of the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "scopes.0", "All"), + // Verify the number of policies in the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "policies.#", "2"), + // Verify name of the first policy in the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "policies.0.name", "first-test-policy"), + // Verify name of the second policy in the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "policies.1.name", "second-test-policy"), + ), + }, + // Reorder and Read testing + { + Config: BuildPolicySetResource(t, policy_set_reordered_testResource), + Check: resource.ComposeAggregateTestCheckFunc( + // Verify name of the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "name", os.Getenv("TEST_POLICY_SET_NAME")), + // Verify description of the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "description", "Test policy set description"), + // Verify type of the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "type", "DeliveryGroupPolicies"), + // Verify the number of scopes of the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "scopes.#", "1"), + // Verify the scopes of the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "scopes.0", "All"), + // Verify the number of policies in the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "policies.#", "2"), + // Verify name of the first policy in the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "policies.0.name", "second-test-policy"), + // Verify name of the second policy in the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "policies.1.name", "first-test-policy"), + ), + }, + // ImportState testing + { + ResourceName: "citrix_policy_set.testPolicySet", + ImportState: true, + ImportStateVerify: true, + // The last_updated attribute does not exist in the Orchestration + // API, therefore there is no value for it during import. + ImportStateVerifyIgnore: []string{"last_updated"}, + }, + // Update and Read testing + { + Config: BuildPolicySetResource(t, policy_set_updated_testResource), + Check: resource.ComposeAggregateTestCheckFunc( + // Verify name of the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "name", os.Getenv("TEST_POLICY_SET_NAME")), + // Verify description of the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "description", "Test policy set description updated"), + // Verify type of the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "type", "DeliveryGroupPolicies"), + // Verify the number of scopes of the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "scopes.#", "1"), + // Verify the scopes of the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "scopes.0", "All"), + // Verify the number of policies in the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "policies.#", "1"), + // Verify name of the second policy in the policy set + resource.TestCheckResourceAttr("citrix_policy_set.testPolicySet", "policies.0.name", "first-test-policy"), + ), + }, + }, + }) +} diff --git a/internal/test/zone_resource_test.go b/internal/test/zone_resource_test.go index 3194c44..3ea38ca 100644 --- a/internal/test/zone_resource_test.go +++ b/internal/test/zone_resource_test.go @@ -12,6 +12,7 @@ import ( func TestZonePreCheck(t *testing.T) { if v := os.Getenv("CITRIX_CUSTOMER_ID"); v != "" && v != "CitrixOnPremises" { + zoneName := os.Getenv("TEST_ZONE_NAME") zoneDescription := os.Getenv("TEST_ZONE_DESCRIPTION") @@ -31,6 +32,7 @@ func TestZoneResource(t *testing.T) { } zoneName := os.Getenv("TEST_ZONE_NAME") + zoneDescription := os.Getenv("TEST_ZONE_DESCRIPTION") if zoneName == "" { zoneName = "second zone" @@ -43,12 +45,12 @@ func TestZoneResource(t *testing.T) { Steps: []resource.TestStep{ // Create and Read testing { - Config: BuildZoneResource(t, zone_testResource), + Config: BuildZoneResource(t, zone_testResource, zoneName), Check: getAggregateTestFunc(isOnPremises, zoneName, zoneDescription), }, // ImportState testing { - ResourceName: "citrix_daas_zone.test", + ResourceName: "citrix_zone.test", ImportState: true, ImportStateVerify: true, // The last_updated attribute does not exist in the Orchestration @@ -57,17 +59,17 @@ func TestZoneResource(t *testing.T) { }, // Update and Read testing { - Config: BuildZoneResource(t, zone_testResource_updated), + Config: BuildZoneResource(t, zone_testResource_updated, zoneName), Check: resource.ComposeAggregateTestCheckFunc( // Verify name of zone - resource.TestCheckResourceAttr("citrix_daas_zone.test", "name", fmt.Sprintf("%s-updated", zoneName)), + resource.TestCheckResourceAttr("citrix_zone.test", "name", fmt.Sprintf("%s-updated", zoneName)), // Verify description of zone - resource.TestCheckResourceAttr("citrix_daas_zone.test", "description", fmt.Sprintf("updated %s", zoneDescription)), + resource.TestCheckResourceAttr("citrix_zone.test", "description", fmt.Sprintf("updated %s", zoneDescription)), // Verify number of meta data of zone - resource.TestCheckResourceAttr("citrix_daas_zone.test", "metadata.#", "4"), + resource.TestCheckResourceAttr("citrix_zone.test", "metadata.#", "4"), // Verify first meta data value - resource.TestCheckResourceAttr("citrix_daas_zone.test", "metadata.3.name", "key4"), - resource.TestCheckResourceAttr("citrix_daas_zone.test", "metadata.3.value", "value4"), + resource.TestCheckResourceAttr("citrix_zone.test", "metadata.3.name", "key4"), + resource.TestCheckResourceAttr("citrix_zone.test", "metadata.3.value", "value4"), ), SkipFunc: getSkipFunc(isOnPremises), }, @@ -78,7 +80,7 @@ func TestZoneResource(t *testing.T) { var ( zone_testResource = ` -resource "citrix_daas_zone" "test" { +resource "citrix_zone" "test" { name = "%s" description = "%s" metadata = [ @@ -99,7 +101,7 @@ resource "citrix_daas_zone" "test" { ` zone_testResource_updated = ` -resource "citrix_daas_zone" "test" { +resource "citrix_zone" "test" { name = "%s-updated" description = "updated %s" metadata = [ @@ -124,8 +126,7 @@ resource "citrix_daas_zone" "test" { ` ) -func BuildZoneResource(t *testing.T, zone string) string { - zoneName := os.Getenv("TEST_ZONE_NAME") +func BuildZoneResource(t *testing.T, zone string, zoneName string) string { zoneDescription := os.Getenv("TEST_ZONE_DESCRIPTION") if zoneName == "" { @@ -148,16 +149,16 @@ func getSkipFunc(isOnPremises bool) func() (bool, error) { func getAggregateTestFunc(isOnPremises bool, zoneName string, zoneDescription string) resource.TestCheckFunc { if isOnPremises { return resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("citrix_daas_zone.test", "name", zoneName), - resource.TestCheckResourceAttr("citrix_daas_zone.test", "description", zoneDescription), - resource.TestCheckResourceAttr("citrix_daas_zone.test", "metadata.#", "3"), - resource.TestCheckResourceAttr("citrix_daas_zone.test", "metadata.0.name", "key1"), - resource.TestCheckResourceAttr("citrix_daas_zone.test", "metadata.0.value", "value1"), + resource.TestCheckResourceAttr("citrix_zone.test", "name", zoneName), + resource.TestCheckResourceAttr("citrix_zone.test", "description", zoneDescription), + resource.TestCheckResourceAttr("citrix_zone.test", "metadata.#", "3"), + resource.TestCheckResourceAttr("citrix_zone.test", "metadata.0.name", "key1"), + resource.TestCheckResourceAttr("citrix_zone.test", "metadata.0.value", "value1"), ) } return resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("citrix_daas_zone.test", "name", zoneName), - resource.TestCheckResourceAttr("citrix_daas_zone.test", "description", zoneDescription), + resource.TestCheckResourceAttr("citrix_zone.test", "name", zoneName), + resource.TestCheckResourceAttr("citrix_zone.test", "description", zoneDescription), ) } diff --git a/internal/util/common.go b/internal/util/common.go index 0d6d809..eb1c159 100644 --- a/internal/util/common.go +++ b/internal/util/common.go @@ -6,6 +6,7 @@ import ( "context" "encoding/json" "fmt" + "io" "io/ioutil" "net/http" "reflect" @@ -35,12 +36,27 @@ const SamRegex string = `^[a-zA-Z][a-zA-Z0-9\- ]{0,61}[a-zA-Z]\\\w[\w\.\- ]+$` // GUID const GuidRegex string = `^[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$` +// IPv4 +const IPv4Regex string = `^((25[0-5]|(2[0-4]|1\d|[1-9]|)\d)\.?\b){4}$` + +// IPv4 with https +const IPv4RegexWithProtocol string = `^(http|https)://((25[0-5]|(2[0-4]|1\d|[1-9]|)\d)\.?\b){4}$` + // Date YYYY-MM-DD const DateRegex string = `^\d{4}-\d{2}-\d{2}$` // Time HH:MM const TimeRegex string = `^([0-1][0-9]|2[0-3]):[0-5][0-9]$` +// Name of the Default Site Policy Set +const DefaultSitePolicySetName string = "DefaultSitePolicies" + +// ID of the Default Site Policy Set +const DefaultSitePolicySetId string = "00000000-0000-0000-0000-000000000000" + +// SSL Thumbprint +const SslThumbprintRegex string = `^([0-9a-fA-F]{40}|[0-9a-fA-F]{64})$` + // Terraform model for name value string pair type NameValueStringPairModel struct { Name types.String `tfsdk:"name"` @@ -217,6 +233,10 @@ func GetValidatorFromEnum[V ~string, T []V](enum T) validator.String { ) } +type HttpErrorBody struct { + Detail string `json:"detail"` +} + // // Wrapper function for reading specific resource from remote with retries // @@ -230,12 +250,23 @@ func GetValidatorFromEnum[V ~string, T []V](enum T) validator.String { func ReadResource[ResponseType any](request any, ctx context.Context, client *citrixdaasclient.CitrixDaasClient, resp *resource.ReadResponse, resourceType, resourceIdOrName string) (ResponseType, *http.Response, error) { response, httpResp, err := citrixdaasclient.ExecuteWithRetry[ResponseType](request, client) if err != nil && resp != nil { + body, _ := io.ReadAll(httpResp.Body) + httpErrorBody := HttpErrorBody{} + json.Unmarshal(body, &httpErrorBody) if httpResp.StatusCode == http.StatusNotFound { resp.Diagnostics.AddWarning( fmt.Sprintf("%s not found", resourceType), fmt.Sprintf("%s %s was not found and will be removed from the state file. An apply action will result in the creation of a new resource.", resourceType, resourceIdOrName), ) + resp.State.RemoveResource(ctx) + } else if httpResp.StatusCode == http.StatusInternalServerError && httpErrorBody.Detail == "Object does not exist." { + + resp.Diagnostics.AddWarning( + fmt.Sprintf("%s not found", resourceType), + fmt.Sprintf("%s %s was not found and will be removed from the state file. An apply action will result in the creation of a new resource.", resourceType, resourceIdOrName), + ) + resp.State.RemoveResource(ctx) } else { resp.Diagnostics.AddError( @@ -259,7 +290,7 @@ func ReadResource[ResponseType any](request any, ctx context.Context, client *ci // Terraform diagnostics from context // Maximum timeout threashold for job status polling // Error if job polling failed or job itself ended in failed state -func ProcessAsyncJobResponse(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, jobResp *http.Response, errContext string, diagnostics *diag.Diagnostics, maxTimeout int) (err error) { +func ProcessAsyncJobResponse(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, jobResp *http.Response, errContext string, diagnostics *diag.Diagnostics, maxTimeout int, returnJobError bool) (err error) { txId := citrixdaasclient.GetTransactionIdFromHttpResponse(jobResp) jobId := citrixdaasclient.GetJobIdFromHttpResponse(*jobResp) @@ -287,6 +318,10 @@ func ProcessAsyncJobResponse(ctx context.Context, client *citrixdaasclient.Citri errContext, errorDetail, ) + + if returnJobError { + return fmt.Errorf(errorDetail) + } } return nil diff --git a/internal/util/resource.go b/internal/util/resource.go index b3d8198..482ecea 100644 --- a/internal/util/resource.go +++ b/internal/util/resource.go @@ -12,6 +12,8 @@ import ( "github.com/hashicorp/terraform-plugin-framework/diag" ) +const NUTANIX_PLUGIN_ID string = "AcropolisFactory" + // Gets the hypervisor and logs any errors func GetHypervisor(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, diagnostics *diag.Diagnostics, hypervisorId string) (*citrixorchestration.HypervisorDetailResponseModel, error) { // Resolve resource path for service offering and master image @@ -106,7 +108,7 @@ func GetSingleResourcePathFromHypervisor(ctx context.Context, client *citrixdaas return "", fmt.Errorf("could not find resource") } -func GetSingleHypervisorResource(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, hypervisorId, folderPath, resourceName, resourceType, resourceGroupName string, connectionType citrixorchestration.HypervisorConnectionType) (*citrixorchestration.HypervisorResourceResponseModel, error) { +func GetSingleHypervisorResource(ctx context.Context, client *citrixdaasclient.CitrixDaasClient, hypervisorId, folderPath, resourceName, resourceType, resourceGroupName string, hypervisor *citrixorchestration.HypervisorDetailResponseModel) (*citrixorchestration.HypervisorResourceResponseModel, error) { req := client.ApiClient.HypervisorsAPIsDAAS.HypervisorsGetHypervisorAllResources(ctx, hypervisorId) req = req.Children(1) if folderPath != "" { @@ -122,39 +124,41 @@ func GetSingleHypervisorResource(ctx context.Context, client *citrixdaasclient.C } for _, child := range resources.Children { - if resourceType == "Vm" { - switch connectionType { - case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: - if strings.EqualFold(child.GetName(), resourceName) { - resourceGroupAndVmName := strings.Split(child.GetId(), "/") - if resourceGroupAndVmName[0] == resourceGroupName { - return &child, nil - } - } - case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: - if strings.EqualFold(strings.Split(child.GetName(), " ")[0], resourceName) { - return &child, nil - } - case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: - if strings.EqualFold(child.GetName(), resourceName) { + switch hypervisor.GetConnectionType() { + case citrixorchestration.HYPERVISORCONNECTIONTYPE_AZURE_RM: + if (resourceType == "Vm" || resourceType == "VirtualPrivateCloud") && strings.EqualFold(child.GetName(), resourceName) { + resourceGroupAndVmName := strings.Split(child.GetId(), "/") + if strings.EqualFold(resourceGroupAndVmName[0], resourceGroupName) { return &child, nil } } - } - if strings.EqualFold(child.GetName(), resourceName) || - (child.GetResourceType() == "Region" && strings.EqualFold(child.GetId(), resourceName)) { // support both Azure region name or id ("East US" and "eastus") - if resourceType == "VirtualPrivateCloud" { - // For vnet, ID is resourceGroup/vnetName. Match the resourceGroup as well - resourceGroupAndVnetName := strings.Split(child.GetId(), "/") - if resourceGroupAndVnetName[0] == resourceGroupName { - return &child, nil - } else { - continue - } + if resourceType == "Region" && (strings.EqualFold(child.GetName(), resourceName) || strings.EqualFold(child.GetId(), resourceName)) { // support both Azure region name or id ("East US" and "eastus") + return &child, nil + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_AWS: + if resourceType == "Vm" && strings.EqualFold(strings.Split(child.GetName(), " ")[0], resourceName) { + return &child, nil + } + if strings.EqualFold(child.GetName(), resourceName) { + return &child, nil + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_GOOGLE_CLOUD_PLATFORM: + if strings.EqualFold(child.GetName(), resourceName) { + return &child, nil + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_V_CENTER: + if strings.EqualFold(child.GetName(), resourceName) { + return &child, nil + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_XEN_SERVER: + if strings.EqualFold(child.GetName(), resourceName) { + return &child, nil + } + case citrixorchestration.HYPERVISORCONNECTIONTYPE_CUSTOM: + if hypervisor.GetPluginId() == NUTANIX_PLUGIN_ID && strings.EqualFold(child.GetName(), resourceName) { + return &child, nil } - - return &child, nil } } diff --git a/settings.cloud.example.json b/settings.cloud.example.json index ce0bd2c..bd89cc8 100644 --- a/settings.cloud.example.json +++ b/settings.cloud.example.json @@ -1,7 +1,4 @@ { - "markdown.extension.toc.updateOnSave": false, - "diffEditor.ignoreTrimWhitespace": false, - "github.copilot.advanced": {}, "go.testEnvVars": { "TF_ACC": "1", @@ -10,24 +7,71 @@ "CITRIX_CLIENT_SECRET": "{client_secret}", // Zone Go Tests - "TEST_ZONE_NAME" :"test-zone", + "TEST_ZONE_NAME": "zone-name", // for zone tests + + // "TEST_ZONE_NAME_AZURE" :"test-zone", // Use the appropriate zone based on hypervisor for hypervisor and other tests + // "TEST_ZONE_NAME_GCP": "test-zone", + // "TEST_ZONE_NAME_VSPHERE": "test-zone", + // "TEST_ZONE_NAME_XENSERVER": "test-zone", "TEST_ZONE_DESCRIPTION" :"test zone description", - // Hypervisor Go Tests - "TEST_HYPERV_NAME" :"{hypervisor_name}", - "TEST_HYPERV_AD_ID" :"{hypervisor_id}", + // Hypervisor Go Tests - Azure + "TEST_HYPERV_NAME_AZURE" :"{hypervisor_name}", + "TEST_HYPERV_AD_ID" :"{hypervisor_ad_id}", "TEST_HYPERV_SUBSCRIPTION_ID" :"{hypervisor_subscription_id}", "TEST_HYPERV_APPLICATION_ID" :"{hyperviso_application_id}", "TEST_HYPERV_APPLICATION_SECRET":"{hypervisor_application_secret}", "TEST_HYPERV_APPLICATION_SECRET_EXPIRATION_DATE": "2030-12-31", + + // Hypervisor Go Tests - GCP + "TEST_HYPERV_NAME_GCP" :"{hypervisor_name}", + "TEST_HYPERV_SERVICE_ACCOUNT_ID" :"{gcp_service_account_id}", + "TEST_HYPERV_SERVICE_ACCOUNT_CREDENTIAL":"{gcp_service_account_credential}", + + // Hypervisor Go Tests - Vsphere + "TEST_HYPERV_NAME_VSPHERE": "{hypervisor_name}", + "TEST_HYPERV_USERNAME_VSPHERE": "{vsphere_hypervisor_user_name}", + "TEST_HYPERV_PASSWORD_PLAINTEXT_VSPHERE": "{vsphere_hypervisor_password}", + "TEST_HYPERV_ADDRESS_VSPHERE": "http://{vsphere_hypervisor_management_ip}", + "TEST_HYPERV_SSL_THUMBPRINT_VSPHERE": "{vsphere_hypervisor_ssl_thumbprint}", + + // Hypervisor Go Tests - XenServer + "TEST_HYPERV_NAME_XENSERVER": "{hypervisor_name}", + "TEST_HYPERV_USERNAME_XENSERVER": "{xenserver_hypervisor_user_name}", + "TEST_HYPERV_PASSWORD_PLAINTEXT_XENSERVER": "{xenserver_hypervisor_password}", + "TEST_HYPERV_ADDRESS_XENSERVER": "http://{xenserver_hypervisor_management_ip}", + "TEST_HYPERV_SSL_THUMBPRINT_XENSERVER": "{xenserver_hypervisor_ssl_thumbprint}", + + // Hypervisor Go Tests - Nutanix + "TEST_HYPERV_NAME_NUTANIX": "{hypervisor_name}", + "TEST_HYPERV_USERNAME_NUTANIX": "{vsphere_hypervisor_user_name}", + "TEST_HYPERV_PASSWORD_PLAINTEXT_NUTANIX": "{vsphere_hypervisor_password}", + "TEST_HYPERV_ADDRESS_NUTANIX": "{xenserver_hypervisor_management_ip}", + + // Hypervisor Resource Pool Go Tests - Azure "TEST_HYPERV_RP_NAME" : "{hypervisor_resource_pool_name}", "TEST_HYPERV_RP_REGION" :"East US", "TEST_HYPERV_RP_VIRTUAL_NETWORK_RESOURCE_GROUP" :"{hypervisor_resource_pool_network_resource_group_name}", "TEST_HYPERV_RP_VIRTUAL_NETWORK" :"{hypervisor_resource_pool_vnet_name}", "Test_HYPERV_RP_SUBNETS" :"[\"{hypervisor_resource_pool_subnet_name}\"]", + // Hypervisor Resource Pool GCP Testing Env Variables + "TEST_HYPERV_RP_NAME_GCP" :"{hypervisor_resource_pool_name}", + "TEST_HYPERV_RP_PROJECT_NAME_GCP":"{gcp_project_name}", + "TEST_HYPERV_RP_REGION_GCP" :"{gcp_region}", + "TEST_HYPERV_RP_VPC_GCP" :"{vpc_name}", + "Test_HYPERV_RP_SUBNETS_GCP" : "[\"{subnet_name}\"]", + + // Hypervisor Resource Pool XenServer Testing Env Variables + "TEST_HYPERV_RP_NAME_XENSERVER" :"{hypervisor_resource_pool_name}", + "TEST_HYPERV_RP_NETWORK_1_XENSERVER" :"Network 0", + "TEST_HYPERV_RP_NETWORK_2_XENSERVER" :"Network 1", + "TEST_HYPERV_RP_STORAGE_XENSERVER" :"{local_or_shared_storage}", + "TEST_HYPERV_RP_TEMP_STORAGE_XENSERVER" :"{local_or_shared_storage}", + + // Machine Catalog Go Tests - Azure - "TEST_MC_NAME" :"azure-onprem-catalog-1", + "TEST_MC_NAME" :"{catalog_name}", "TEST_MC_SERVICE_ACCOUNT" :"{admin_username}", "TEST_MC_SERVICE_ACCOUNT_PASS" :"{admin_password}", "TEST_MC_SERVICE_OFFERING" :"Standard_D2_v2", @@ -39,7 +83,7 @@ "TEST_MC_SUBNET" :"ctx-cvad-subnet", // Machine Catalog Go Tests - GCP - "TEST_MC_NAME_GCP" :"gcp-onprem-catalog-1", + "TEST_MC_NAME_GCP" :"{catalog_name}", "TEST_MC_SERVICE_ACCOUNT_GCP" :"{admin_username}", "TEST_MC_SERVICE_ACCOUNT_PASS_GCP" :"{admin_password}", "TEST_MC_STORAGE_TYPE_GCP" :"pd-standard", @@ -50,39 +94,64 @@ "TEST_MC_DOMAIN_GCP" :"ctx-ad.local", "TEST_MC_Subnet_GCP" :"us-east1", - //Machine Catalog Go Tests - Azure Manual Power Managed - "TEST_MC_NAME_MANUAL" :"azure-manual-catalog-1", + // Machine Catalog Go Tests - Azure Manual Power Managed + "TEST_MC_NAME_MANUAL" :"{catalog_name}", "TEST_MC_ALLOCATION_TYPE_MANUAL_POWER_MANAGED" :"Static", "TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED" :"SingleSession", "TEST_MC_REGION_MANUAL_POWER_MANAGED" :"East US", "TEST_MC_RESOURCE_GROUP_MANUAL_POWER_MANAGED" :"{resource_group_name}", - "TEST_MC_MACHINE_NAME_MANUAL_Azure" :"{machine_domain_name}\\\\manual-01", + "TEST_MC_MACHINE_NAME_MANUAL_AZURE" :"manual-01", + "TEST_MC_MACHINE_ACCOUNT_MANUAL_AZURE" :"{machine_domain_name}\\\\manual-01", - //Machine Catalog Go Tests - GCP Manual Power Managed - "TEST_MC_MACHINE_NAME_MANUAL_GCP" :"{machine_domain_name}\\\\instance-1", + // Machine Catalog Go Tests - GCP Manual Power Managed + "TEST_MC_MACHINE_ACCOUNT_MANUAL_GCP" : "{machine_domain_name}\\\\instance-1", + "TEST_MC_MACHINE_NAME_MANUAL_GCP" :"instance-1", "TEST_MC_PROJECT_NAME_MANUAL_POWER_MANAGED" :"{GCP_Project_Name}", + "TEST_MC_REGION_MANUAL_POWER_MANAGED_GCP" :"us-east1", + + // Machine Catalog Go Tests - VSPHERE Manual Power Managed + "TEST_MC_DATACENTER_VSPHERE": "{datacenter_name}", + "TEST_MC_HOST_VSPHERE" :"{host_ip_address}", + "TEST_MC_MACHINE_NAME_MANUAL_VSPHERE": "machine-vm-name", + "TEST_MC_MACHINE_ACCOUNT_MANUAL_VSPHERE": "{machine_domain_name}\\machine_name", - //Machine Catalog Go Tests - Non Power Managed - "TEST_MC_MACHINE_NAME_MANUAL_NON_POWER_MANAGED" : "{machine_domain_name}\\\\manual-02", + // Machine Catalog Go Tests - Nutanix Manual Power Managed + "TEST_MC_MACHINE_NAME_MANUAL_NUTANIX": "machine-vm-name", + "TEST_MC_MACHINE_ACCOUNT_MANUAL_NUTANIX": "{machine_domain_name}}\\\\machine-name", + + // Machine Catalog Go Tests - Non Power Managed + "TEST_MC_MACHINE_ACCOUNT_MANUAL_NON_POWER_MANAGED" : "{machine_domain_name}\\\\manual-02", "TEST_MC_ALLOCATION_TYPE_MANUAL_NON_POWER_MANAGED" : "Static", "TEST_MC_SESSION_SUPPORT_MANUAL_NON_POWER_MANAGED" : "SingleSession", - //Machine Catalog Go Tests - Remote PC - "TEST_MC_NAME_REMOTE_PC" :"azure-onprem-remote-pc", - "TEST_MC_MACHINE_NAME_REMOTE_PC" :"{machine_domain_name}\\\\manual-01", + // Machine Catalog Go Tests - Remote PC + "TEST_MC_NAME_REMOTE_PC" :"{catalog_name}", + "TEST_MC_MACHINE_ACCOUNT_REMOTE_PC" :"{machine_domain_name}\\\\manual-01", "TEST_MC_ALLOCATION_TYPE_REMOTE_PC" :"Static", "TEST_MC_INCLUDE_SUBFOLDERS_REMOTE_PC" :"false", "TEST_MC_OU_REMOTE_PC" :"{OU}", - //machine account env variable + // Machine account env variable "TEST_MC_DOMAIN" :"ctx-ad.local", "TEST_MC_DOMAIN_OU" :"", - //Delivery Group Testing Env Variables + // Delivery Group Testing Env Variables "TEST_DG_NAME" :"ctx-test-delivery-group", + "TEST_POLICY_SET_WITHOUT_DG_NAME" : "test-policy-set-without-dg", + + // Application Go Tests + "TEST_APP_NAME" :"app-test", + // Application Folder Go Tests + "TEST_APP_FOLDER_NAME" :"app-folder", // Admin role env variable - "TEST_ROLE_NAME" :"ctx-test-role" + "TEST_ROLE_NAME" :"ctx-test-role", + // Admin Scope Go Tests + "TEST_ADMIN_SCOPE_NAME" :"ctx-test-scope", + + // Policy Set env variable + "TEST_POLICY_SET_NAME" :"ctx-test-policy-set", + "CITRIX_DDC_HOST_NAME" : "{customerId}.xendesktop.net" }, "go.testTimeout": "30m" } \ No newline at end of file diff --git a/settings.onprem.example.json b/settings.onprem.example.json index 79da437..6900010 100644 --- a/settings.onprem.example.json +++ b/settings.onprem.example.json @@ -8,24 +8,70 @@ "CITRIX_DISABLE_SSL_VERIFICATION": "true", // Zone Go Tests - "TEST_ZONE_NAME" :"test-zone", + "TEST_ZONE_NAME": "zone-name", // for zone tests + + // "TEST_ZONE_NAME_AZURE" :"test-zone", // Use the appropriate zone based on hypervisor for hypervisor and other tests + // "TEST_ZONE_NAME_GCP": "test-zone", + // "TEST_ZONE_NAME_VSPHERE": "test-zone", + // "TEST_ZONE_NAME_XENSERVER": "test-zone", "TEST_ZONE_DESCRIPTION" :"test zone description", - // Hypervisor Go Tests - "TEST_HYPERV_NAME" :"{hypervisor_name}", - "TEST_HYPERV_AD_ID" :"{hypervisor_id}", + // Hypervisor Go Tests - Azure + "TEST_HYPERV_NAME_AZURE" :"{hypervisor_name}", + "TEST_HYPERV_AD_ID" :"{hypervisor_ad_id}", "TEST_HYPERV_SUBSCRIPTION_ID" :"{hypervisor_subscription_id}", "TEST_HYPERV_APPLICATION_ID" :"{hyperviso_application_id}", "TEST_HYPERV_APPLICATION_SECRET":"{hypervisor_application_secret}", "TEST_HYPERV_APPLICATION_SECRET_EXPIRATION_DATE": "2030-12-31", + + // Hypervisor Go Tests - GCP + "TEST_HYPERV_NAME_GCP" :"{hypervisor_name}", + "TEST_HYPERV_SERVICE_ACCOUNT_ID" :"{gcp_service_account_id}", + "TEST_HYPERV_SERVICE_ACCOUNT_CREDENTIAL":"{gcp_service_account_credential}", + + // Hypervisor Go Tests - Vsphere + "TEST_HYPERV_NAME_VSPHERE": "{hypervisor_name}", + "TEST_HYPERV_USERNAME_VSPHERE": "{vsphere_hypervisor_user_name}", + "TEST_HYPERV_PASSWORD_PLAINTEXT_VSPHERE": "{vsphere_hypervisor_password}", + "TEST_HYPERV_ADDRESS_VSPHERE": "http://{vsphere_hypervisor_management_ip}", + "TEST_HYPERV_SSL_THUMBPRINT_VSPHERE": "{vsphere_hypervisor_ssl_thumbprint}", + + // Hypervisor Go Tests - XenServer + "TEST_HYPERV_NAME_XENSERVER": "{hypervisor_name}", + "TEST_HYPERV_USERNAME_XENSERVER": "{xenserver_hypervisor_user_name}", + "TEST_HYPERV_PASSWORD_PLAINTEXT_XENSERVER": "{xenserver_hypervisor_password}", + "TEST_HYPERV_ADDRESS_XENSERVER": "http://{xenserver_hypervisor_management_ip}", + "TEST_HYPERV_SSL_THUMBPRINT_XENSERVER": "{xenserver_hypervisor_ssl_thumbprint}", + + // Hypervisor Go Tests - Nutanix + "TEST_HYPERV_NAME_NUTANIX": "{hypervisor_name}", + "TEST_HYPERV_USERNAME_NUTANIX": "{vsphere_hypervisor_user_name}", + "TEST_HYPERV_PASSWORD_PLAINTEXT_NUTANIX": "{vsphere_hypervisor_password}", + "TEST_HYPERV_ADDRESS_NUTANIX": "{xenserver_hypervisor_management_ip}", + + // Hypervisor Resource Pool Go Tests - Azure "TEST_HYPERV_RP_NAME" : "{hypervisor_resource_pool_name}", "TEST_HYPERV_RP_REGION" :"East US", "TEST_HYPERV_RP_VIRTUAL_NETWORK_RESOURCE_GROUP" :"{hypervisor_resource_pool_network_resource_group_name}", "TEST_HYPERV_RP_VIRTUAL_NETWORK" :"{hypervisor_resource_pool_vnet_name}", "Test_HYPERV_RP_SUBNETS" :"[\"{hypervisor_resource_pool_subnet_name}\"]", + + // Hypervisor Resource Pool GCP Testing Env Variables + "TEST_HYPERV_RP_NAME_GCP" :"{hypervisor_resource_pool_name}", + "TEST_HYPERV_RP_PROJECT_NAME_GCP":"{gcp_project_name}", + "TEST_HYPERV_RP_REGION_GCP" :"{gcp_region}", + "TEST_HYPERV_RP_VPC_GCP" :"{vpc_name}", + "Test_HYPERV_RP_SUBNETS_GCP" : "[\"{subnet_name}\"]", + + // Hypervisor Resource Pool XenServer Testing Env Variables + "TEST_HYPERV_RP_NAME_XENSERVER" :"{hypervisor_resource_pool_name}", + "TEST_HYPERV_RP_NETWORK_1_XENSERVER" :"Network 0", + "TEST_HYPERV_RP_NETWORK_2_XENSERVER" :"Network 1", + "TEST_HYPERV_RP_STORAGE_XENSERVER" :"{local_or_shared_storage}", + "TEST_HYPERV_RP_TEMP_STORAGE_XENSERVER" :"{local_or_shared_storage}", // Machine Catalog Go Tests - Azure - "TEST_MC_NAME" :"azure-onprem-catalog-1", + "TEST_MC_NAME" :"{catalog_name}", "TEST_MC_SERVICE_ACCOUNT" :"{admin_username}", "TEST_MC_SERVICE_ACCOUNT_PASS" :"{admin_password}", "TEST_MC_SERVICE_OFFERING" :"Standard_D2_v2", @@ -37,7 +83,7 @@ "TEST_MC_SUBNET" :"ctx-cvad-subnet", // Machine Catalog Go Tests - GCP - "TEST_MC_NAME_GCP" :"gcp-onprem-catalog-1", + "TEST_MC_NAME_GCP" :"{catalog_name}", "TEST_MC_SERVICE_ACCOUNT_GCP" :"{admin_username}", "TEST_MC_SERVICE_ACCOUNT_PASS_GCP" :"{admin_password}", "TEST_MC_STORAGE_TYPE_GCP" :"pd-standard", @@ -48,38 +94,64 @@ "TEST_MC_DOMAIN_GCP" :"ctx-ad.local", "TEST_MC_Subnet_GCP" :"us-east1", - //Machine Catalog Go Tests - Azure Manual Power Managed - "TEST_MC_NAME_MANUAL" :"azure-manual-catalog-1", + // Machine Catalog Go Tests - Azure Manual Power Managed + "TEST_MC_NAME_MANUAL" :"{catalog_name}", "TEST_MC_ALLOCATION_TYPE_MANUAL_POWER_MANAGED" :"Static", "TEST_MC_SESSION_SUPPORT_MANUAL_POWER_MANAGED" :"SingleSession", "TEST_MC_REGION_MANUAL_POWER_MANAGED" :"East US", "TEST_MC_RESOURCE_GROUP_MANUAL_POWER_MANAGED" :"{resource_group_name}", - "TEST_MC_MACHINE_NAME_MANUAL_Azure" :"{machine_domain_name}\\\\manual-01", + "TEST_MC_MACHINE_NAME_MANUAL_AZURE" :"manual-01", + "TEST_MC_MACHINE_ACCOUNT_MANUAL_AZURE" :"{machine_domain_name}\\\\manual-01", - //Machine Catalog Go Tests - GCP Manual Power Managed - "TEST_MC_MACHINE_NAME_MANUAL_GCP" :"{machine_domain_name}\\\\instance-1", + // Machine Catalog Go Tests - GCP Manual Power Managed + "TEST_MC_MACHINE_ACCOUNT_MANUAL_GCP" :"{machine_domain_name}\\\\instance-1", + "TEST_MC_MACHINE_NAME_MANUAL_GCP" :"instance-1", "TEST_MC_PROJECT_NAME_MANUAL_POWER_MANAGED" :"{GCP_Project_Name}", + "TEST_MC_REGION_MANUAL_POWER_MANAGED_GCP" :"us-east1", + + // Machine Catalog Go Tests - VSPHERE Manual Power Managed + "TEST_MC_DATACENTER_VSPHERE": "{datacenter_name}", + "TEST_MC_HOST_VSPHERE" :"{host_ip_address}", + "TEST_MC_MACHINE_NAME_MANUAL_VSPHERE": "machine-vm-name", + "TEST_MC_MACHINE_ACCOUNT_MANUAL_VSPHERE": "{machine_domain_name}\\machine_name", + + // Machine Catalog Go Tests - Nutanix Manual Power Managed + "TEST_MC_MACHINE_NAME_MANUAL_NUTANIX": "machine-vm-name", + "TEST_MC_MACHINE_ACCOUNT_MANUAL_NUTANIX": "{machine_domain_name}}\\\\machine-name", - //Machine Catalog Go Tests - Non Power Managed - "TEST_MC_MACHINE_NAME_MANUAL_NON_POWER_MANAGED" : "{machine_domain_name}\\\\manual-02", + // Machine Catalog Go Tests - Non Power Managed + "TEST_MC_MACHINE_ACCOUNT_MANUAL_NON_POWER_MANAGED" : "{machine_domain_name}\\\\manual-02", "TEST_MC_ALLOCATION_TYPE_MANUAL_NON_POWER_MANAGED" : "Static", "TEST_MC_SESSION_SUPPORT_MANUAL_NON_POWER_MANAGED" : "SingleSession", - //Machine Catalog Go Tests - Remote PC - "TEST_MC_NAME_REMOTE_PC" :"azure-onprem-remote-pc", - "TEST_MC_MACHINE_NAME_REMOTE_PC" :"{machine_domain_name}\\\\manual-01", + // Machine Catalog Go Tests - Remote PC + "TEST_MC_NAME_REMOTE_PC" :"{catalog_name}", + "TEST_MC_MACHINE_ACCOUNT_REMOTE_PC" :"{machine_domain_name}\\\\manual-01", "TEST_MC_ALLOCATION_TYPE_REMOTE_PC" :"Static", "TEST_MC_INCLUDE_SUBFOLDERS_REMOTE_PC" :"false", "TEST_MC_OU_REMOTE_PC" :"{OU}", - //machine account env variable + // Machine account env variable "TEST_MC_DOMAIN" :"cmdlab.net", "TEST_MC_DOMAIN_OU" :"", - //Delivery Group Testing Env Variables - "TEST_DG_NAME" :"ak-test-delivery-group" + // Delivery Group Testing Env Variables + "TEST_DG_NAME" :"ak-test-delivery-group", + "TEST_POLICY_SET_WITHOUT_DG_NAME" : "test-policy-set-without-dg", + + // Application Go Tests + "TEST_APP_NAME" :"app-test", + // Application Folder Go Tests + "TEST_APP_FOLDER_NAME" :"app-folder", + + // Admin role env variable + "TEST_ROLE_NAME" :"ctx-test-role", + // Admin Scope Go Tests + "TEST_ADMIN_SCOPE_NAME" :"ctx-test-scope", + + // Policy Set env variable + "TEST_POLICY_SET_NAME" :"ctx-test-policy-set", + "CITRIX_DDC_HOST_NAME" : "{ddcHostname}" }, - "go.testTimeout": "30m", - "git.openRepositoryInParentFolders": "never", - "go.gopath": "" + "go.testTimeout": "30m" } \ No newline at end of file